From 3017050eec2c0fe192625b45abdb667137667ec3 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Fri, 17 Jan 2025 08:44:22 -0800 Subject: [PATCH 1/2] github actions: Use python PR check script jira LE-2214 Obsoletes the old ruby PR check script --- .github/workflows/process-git-request.py | 134 ++++++++++++++++++++ .github/workflows/process-git-request.rb | 140 --------------------- .github/workflows/process-pull-request.yml | 48 +++++++ 3 files changed, 182 insertions(+), 140 deletions(-) create mode 100644 .github/workflows/process-git-request.py delete mode 100644 .github/workflows/process-git-request.rb create mode 100644 .github/workflows/process-pull-request.yml diff --git a/.github/workflows/process-git-request.py b/.github/workflows/process-git-request.py new file mode 100644 index 0000000000000..554dbec138aa8 --- /dev/null +++ b/.github/workflows/process-git-request.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 + +import sys +import subprocess +import os +import re +import git + +def log_commits_between_branches(repo_path, from_branch, to_branch): + repo = git.Repo(repo_path) + + # Get the common ancestor of the two branches + common_ancestor = repo.merge_base(from_branch, to_branch) + + print(f"Common ancestor is {common_ancestor}") + + # Get the commits in 'from_branch' that are not in 'to_branch' + commits = list(repo.iter_commits(f"{to_branch}..{from_branch}")) + +# for commit in commits: +# print(commit.hexsha, commit.message.strip()) + +def file_prepend(file, str): + with open(file, 'r') as fd: + contents = fd.read() + new_contents = str + contents + + # Overwrite file but now with prepended string on it + with open(file, 'w') as fd: + fd.write(new_contents) + +def process_git_request(fname, target_branch, source_branch, prj_dir): + retcode = 0 # presume success + file = open(fname, "w") + working_dir = prj_dir + os.chdir(working_dir) + + repo = git.Repo(".") + commits = repo.iter_commits(f"{target_branch}..{source_branch}") + loglines_to_check = 13 + for commit in commits: + print(f"{commit.hexsha} {commit.message.splitlines()[0]}") + + commit_sha = commit.hexsha + + git_cmd = "git show " + commit_sha + gitlog_out, gitlog_err = subprocess.Popen(git_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() + + loglines = gitlog_out.splitlines() + lines_counted = 0 + local_diffdiff_sha = commit_sha + upstream_diffdiff_sha = "" + upstream_diff = False + + for logline in loglines: +# print(f"Processing logline {commit_sha}") + lines_counted += 1 + if lines_counted == 1: + file.write("Merge Request sha: " + local_diffdiff_sha) + file.write("\n") + if lines_counted == 2: # email address + if "ciq.com" not in logline.lower(): + # Bad Author + s = f"error:\nBad {logline}\n" + print(s) + file.write(s) + file.close() + return retcode + if lines_counted > 1: + if "jira" in logline.lower(): + file.write("\t" + logline + "\n") + + if "upstream-diff" in logline.lower(): + upstream_diff = True + + if "commit" in logline.lower(): + local_commit_sha = re.search(r'[0-9a-f]{40}', logline) + upstream_diffdiff_sha = str(local_commit_sha.group(0)) if local_commit_sha else "" + if upstream_diffdiff_sha: + print(f"Upstream : " + upstream_diffdiff_sha) + file.write("\tUpstream sha: " + upstream_diffdiff_sha) + file.write("\n") + + if lines_counted > loglines_to_check: # Everything we need should be in the first loglines_to_check lines +# print(f"Breaking after {loglines_to_check} lines of commit checking") + break + + if local_diffdiff_sha and upstream_diffdiff_sha: + diff_cmd = os.path.join(os.getcwd(), ".github/workflows/diffdiff.py") + " --colour --commit " + local_diffdiff_sha +# print("diffdiff: " + diff_cmd) + process = subprocess.run(diff_cmd, shell=True, capture_output=True, text=True) + diff_out = process.stdout + diff_err = process.stderr + diff_status = process.returncode + + if diff_status != 0 and not upstream_diff: + print(f"diffdiff out: " + diff_out) + print(f"diffdiff err: " + diff_err) + retcode = 1 + file.write("error:\nCommit: " + local_diffdiff_sha + " differs with no upstream tag in commit message\n") + + return retcode + +first_arg, *argv_in = sys.argv[1:] # Skip script name in sys.argv + +if len(argv_in) < 5: + print("Not enough arguments: fname, target_branch, source_branch, prj_dir, pull_request, requestor") + sys.exit() + +fname = str(first_arg) +fname = "tmp-" + fname +# print("filename is " + fname) +target_branch = str(argv_in[0]) +# print("target branch is " + target_branch) +source_branch = str(argv_in[1]) +# print("source branch is " + source_branch) +prj_dir = str(argv_in[2]) +# print("project dir is " + prj_dir) +pullreq = str(argv_in[3]) +# print("pull request is " + pullreq) +requestor = str(argv_in[4]) + +retcode = process_git_request(fname, target_branch, source_branch, prj_dir) + +if retcode != 0: + with open(fname, 'r') as fd: + contents = fd.read() + print(contents) + sys.exit(1) +else: + print("Done") + +sys.exit(0) + diff --git a/.github/workflows/process-git-request.rb b/.github/workflows/process-git-request.rb deleted file mode 100644 index 04a2ccd49b8bc..0000000000000 --- a/.github/workflows/process-git-request.rb +++ /dev/null @@ -1,140 +0,0 @@ -require 'open3' - -requestors = { "gvrose8192" => "" } - -def file_prepend(file, str) - new_contents = "" - File.open(file, 'r') do |fd| - contents = fd.read - new_contents = str << contents - end - # Overwrite file but now with prepended string on it - File.open(file, 'w') do |fd| - fd.write(new_contents) - end -end - -def process_git_request(fname, target_branch, source_branch, prj_dir) - retcode = 200 #presume success -# puts "Opening file " + fname - file = File.new(fname, "w") - working_dir = prj_dir -# puts "Working Dir : " + working_dir - Dir.chdir working_dir -# puts "pwd : " + Dir.pwd - git_cmd = "git log --oneline --no-abbrev-commit origin/" + target_branch + ".." + "origin/" + source_branch -# puts git_cmd - out, err, status = Open3.capture3(git_cmd) - if status.exitstatus != 0 - puts "Command error output is " + err - file.write("Command error output is " + err) - file.close - retcode = 201 - return retcode - end - output_lines = out.split(' ') -# we just want the commit sha IDs - output_lines.each { |x| -# puts "This is output_lines " + x - upstream_diff = false - if !x[/\H/] - if x.length < 40 - next - end - git_cmd = "git show " + x - gitlog_out, gitlog_err, gitlog_status = Open3.capture3(git_cmd) - if gitlog_status.exitstatus != 0 - file.write("git show command error output is " + gitlog_err) - retcode = 201 - end - loglines = gitlog_out.lines.map(&:chomp) - lines_counted = 0 - local_diffdiff_sha = "" - upstream_diffdiff_sha = "" - loglines.each { |logline| - lines_counted = lines_counted + 1 - if lines_counted == 1 - local_commit_sha = logline.match("[0-9a-f]\{40\}") - local_diffdiff_sha = local_commit_sha.to_s -# puts "Local : " + local_diffdiff_sha - file.write("Merge Request sha: " + local_diffdiff_sha) - file.write("\n") - end - if lines_counted == 2 #email address - if !logline.downcase.include? "ciq.com" - # Bad Author - s = "error:\nBad " + logline + "\n" - puts s - file.write(s) - retcode = 201 - else - file.write("\t" + logline + "\n") - end - end - if lines_counted > 1 - if logline.downcase.include? "jira" - file.write("\t" + logline + "\n") - end - if logline.downcase.include? "upstream-diff" - upstream_diff = true - end - if logline.downcase.include? "commit" - commit_sha = logline.match("[0-9a-f]\{40\}") - upstream_diffdiff_sha = commit_sha.to_s -# puts "Upstream : " + upstream_diffdiff_sha - if (!upstream_diffdiff_sha.empty?) - file.write("\tUpstream sha: " + upstream_diffdiff_sha) - file.write("\n") - end - end - end - if lines_counted > 8 #Everything we need should be in the first 8 lines - break - end - } - if !local_diffdiff_sha.empty? && !upstream_diffdiff_sha.empty? - diff_cmd = Dir.pwd + "/.github/workflows/diffdiff.py --colour --commit " + local_diffdiff_sha - puts "diffdiff: " + diff_cmd - diff_out, diff_err, diff_status = Open3.capture3(diff_cmd) - if diff_status.exitstatus != 0 && !upstream_diff - puts "diffdiff out: " + diff_out - puts "diffdiff err: " + diff_err - retcode = 201 - file.write("error:\nCommit: " + local_diffdiff_sha + " differs with no upstream tag in commit message\n") - end - end - end - } - file.close - return retcode -end - -first_arg, *argv_in = ARGV -if argv_in.length < 5 - puts "Not enough arguments: fname, target_branch, source_branch, prj_dir, pull_request, requestor" - exit -end -fname = first_arg.to_s -fname = "tmp-" + fname -# puts "filename is " + fname -target_branch = argv_in[0].to_s -# puts "target branch is " + target_branch -source_branch = argv_in[1].to_s -# puts "source branch is " + source_branch -prj_dir = argv_in[2].to_s -# puts "project dir is " + prj_dir -pullreq = argv_in[3].to_s -# puts "pull request is " + pullreq -requestor = argv_in[4].to_s -retcode = process_git_request(fname, target_branch, source_branch, prj_dir) -if retcode != 200 - File.open(fname, 'r') do |fd| - contents = fd.read - puts contents - end - exit(1) -else - puts "Done" -end -exit(0) - diff --git a/.github/workflows/process-pull-request.yml b/.github/workflows/process-pull-request.yml new file mode 100644 index 0000000000000..90fe72473d368 --- /dev/null +++ b/.github/workflows/process-pull-request.yml @@ -0,0 +1,48 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Pull Request Checker + +on: + pull_request: + branches: + - '**' + - '!mainline' + +permissions: + contents: read + +jobs: + test: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + - name: Run tests + run: | + /usr/bin/pip3 install gitPython + python -c "import sys; import git; print(sys.version)" + rm -rf /home/runner/work/kernel-src-tree/kernel-src-tree + cd /home/runner/work/kernel-src-tree + git clone https://github.com/ctrliq/kernel-src-tree + cd kernel-src-tree + git fetch --all + git checkout -b ${{ github.head_ref }} origin/${{ github.head_ref }} + git checkout ${{ github.base_ref }} + git remote add linux https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git + git fetch --shallow-since="2 years ago" linux + echo "Will run process-git-request.py with:" + echo "fname = ${{ github.run_id }}" + echo "target_branch = ${{ github.base_ref }}" + echo "source_branch = ${{ github.head_ref }}" + echo "prj_dir = ${{ github.workspace }}" + echo "pull_request = ${{ github.ref }}" + echo "requestor = ${{ github.actor }}" + cd ${{ github.workspace }} + /usr/bin/python3 .github/workflows/process-git-request.py ${{ github.run_id }} ${{ github.base_ref }} \ + ${{ github.ref }} ${{ github.workspace }} ${{ github.head_ref }} ${{ github.actor }} From 998b4246fb2aca3b315464446dab949221a299da Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Fri, 17 Jan 2025 10:49:17 -0800 Subject: [PATCH 2/2] net: sched: sch_qfq: Fix UAF in qfq_dequeue() jira VULN-6730 cve CVE-2023-4921 commit-author valis commit 8fc134fee27f2263988ae38920bc03da416b03d8 When the plug qdisc is used as a class of the qfq qdisc it could trigger a UAF. This issue can be reproduced with following commands: tc qdisc add dev lo root handle 1: qfq tc class add dev lo parent 1: classid 1:1 qfq weight 1 maxpkt 512 tc qdisc add dev lo parent 1:1 handle 2: plug tc filter add dev lo parent 1: basic classid 1:1 ping -c1 127.0.0.1 and boom: [ 285.353793] BUG: KASAN: slab-use-after-free in qfq_dequeue+0xa7/0x7f0 [ 285.354910] Read of size 4 at addr ffff8880bad312a8 by task ping/144 [ 285.355903] [ 285.356165] CPU: 1 PID: 144 Comm: ping Not tainted 6.5.0-rc3+ #4 [ 285.357112] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 [ 285.358376] Call Trace: [ 285.358773] [ 285.359109] dump_stack_lvl+0x44/0x60 [ 285.359708] print_address_description.constprop.0+0x2c/0x3c0 [ 285.360611] kasan_report+0x10c/0x120 [ 285.361195] ? qfq_dequeue+0xa7/0x7f0 [ 285.361780] qfq_dequeue+0xa7/0x7f0 [ 285.362342] __qdisc_run+0xf1/0x970 [ 285.362903] net_tx_action+0x28e/0x460 [ 285.363502] __do_softirq+0x11b/0x3de [ 285.364097] do_softirq.part.0+0x72/0x90 [ 285.364721] [ 285.365072] [ 285.365422] __local_bh_enable_ip+0x77/0x90 [ 285.366079] __dev_queue_xmit+0x95f/0x1550 [ 285.366732] ? __pfx_csum_and_copy_from_iter+0x10/0x10 [ 285.367526] ? __pfx___dev_queue_xmit+0x10/0x10 [ 285.368259] ? __build_skb_around+0x129/0x190 [ 285.368960] ? ip_generic_getfrag+0x12c/0x170 [ 285.369653] ? __pfx_ip_generic_getfrag+0x10/0x10 [ 285.370390] ? csum_partial+0x8/0x20 [ 285.370961] ? raw_getfrag+0xe5/0x140 [ 285.371559] ip_finish_output2+0x539/0xa40 [ 285.372222] ? __pfx_ip_finish_output2+0x10/0x10 [ 285.372954] ip_output+0x113/0x1e0 [ 285.373512] ? __pfx_ip_output+0x10/0x10 [ 285.374130] ? icmp_out_count+0x49/0x60 [ 285.374739] ? __pfx_ip_finish_output+0x10/0x10 [ 285.375457] ip_push_pending_frames+0xf3/0x100 [ 285.376173] raw_sendmsg+0xef5/0x12d0 [ 285.376760] ? do_syscall_64+0x40/0x90 [ 285.377359] ? __static_call_text_end+0x136578/0x136578 [ 285.378173] ? do_syscall_64+0x40/0x90 [ 285.378772] ? kasan_enable_current+0x11/0x20 [ 285.379469] ? __pfx_raw_sendmsg+0x10/0x10 [ 285.380137] ? __sock_create+0x13e/0x270 [ 285.380673] ? __sys_socket+0xf3/0x180 [ 285.381174] ? __x64_sys_socket+0x3d/0x50 [ 285.381725] ? entry_SYSCALL_64_after_hwframe+0x6e/0xd8 [ 285.382425] ? __rcu_read_unlock+0x48/0x70 [ 285.382975] ? ip4_datagram_release_cb+0xd8/0x380 [ 285.383608] ? __pfx_ip4_datagram_release_cb+0x10/0x10 [ 285.384295] ? preempt_count_sub+0x14/0xc0 [ 285.384844] ? __list_del_entry_valid+0x76/0x140 [ 285.385467] ? _raw_spin_lock_bh+0x87/0xe0 [ 285.386014] ? __pfx__raw_spin_lock_bh+0x10/0x10 [ 285.386645] ? release_sock+0xa0/0xd0 [ 285.387148] ? preempt_count_sub+0x14/0xc0 [ 285.387712] ? freeze_secondary_cpus+0x348/0x3c0 [ 285.388341] ? aa_sk_perm+0x177/0x390 [ 285.388856] ? __pfx_aa_sk_perm+0x10/0x10 [ 285.389441] ? check_stack_object+0x22/0x70 [ 285.390032] ? inet_send_prepare+0x2f/0x120 [ 285.390603] ? __pfx_inet_sendmsg+0x10/0x10 [ 285.391172] sock_sendmsg+0xcc/0xe0 [ 285.391667] __sys_sendto+0x190/0x230 [ 285.392168] ? __pfx___sys_sendto+0x10/0x10 [ 285.392727] ? kvm_clock_get_cycles+0x14/0x30 [ 285.393328] ? set_normalized_timespec64+0x57/0x70 [ 285.393980] ? _raw_spin_unlock_irq+0x1b/0x40 [ 285.394578] ? __x64_sys_clock_gettime+0x11c/0x160 [ 285.395225] ? __pfx___x64_sys_clock_gettime+0x10/0x10 [ 285.395908] ? _copy_to_user+0x3e/0x60 [ 285.396432] ? exit_to_user_mode_prepare+0x1a/0x120 [ 285.397086] ? syscall_exit_to_user_mode+0x22/0x50 [ 285.397734] ? do_syscall_64+0x71/0x90 [ 285.398258] __x64_sys_sendto+0x74/0x90 [ 285.398786] do_syscall_64+0x64/0x90 [ 285.399273] ? exit_to_user_mode_prepare+0x1a/0x120 [ 285.399949] ? syscall_exit_to_user_mode+0x22/0x50 [ 285.400605] ? do_syscall_64+0x71/0x90 [ 285.401124] entry_SYSCALL_64_after_hwframe+0x6e/0xd8 [ 285.401807] RIP: 0033:0x495726 [ 285.402233] Code: ff ff ff f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 11 b8 2c 00 00 00 0f 09 [ 285.404683] RSP: 002b:00007ffcc25fb618 EFLAGS: 00000246 ORIG_RAX: 000000000000002c [ 285.405677] RAX: ffffffffffffffda RBX: 0000000000000040 RCX: 0000000000495726 [ 285.406628] RDX: 0000000000000040 RSI: 0000000002518750 RDI: 0000000000000000 [ 285.407565] RBP: 00000000005205ef R08: 00000000005f8838 R09: 000000000000001c [ 285.408523] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000002517634 [ 285.409460] R13: 00007ffcc25fb6f0 R14: 0000000000000003 R15: 0000000000000000 [ 285.410403] [ 285.410704] [ 285.410929] Allocated by task 144: [ 285.411402] kasan_save_stack+0x1e/0x40 [ 285.411926] kasan_set_track+0x21/0x30 [ 285.412442] __kasan_slab_alloc+0x55/0x70 [ 285.412973] kmem_cache_alloc_node+0x187/0x3d0 [ 285.413567] __alloc_skb+0x1b4/0x230 [ 285.414060] __ip_append_data+0x17f7/0x1b60 [ 285.414633] ip_append_data+0x97/0xf0 [ 285.415144] raw_sendmsg+0x5a8/0x12d0 [ 285.415640] sock_sendmsg+0xcc/0xe0 [ 285.416117] __sys_sendto+0x190/0x230 [ 285.416626] __x64_sys_sendto+0x74/0x90 [ 285.417145] do_syscall_64+0x64/0x90 [ 285.417624] entry_SYSCALL_64_after_hwframe+0x6e/0xd8 [ 285.418306] [ 285.418531] Freed by task 144: [ 285.418960] kasan_save_stack+0x1e/0x40 [ 285.419469] kasan_set_track+0x21/0x30 [ 285.419988] kasan_save_free_info+0x27/0x40 [ 285.420556] ____kasan_slab_free+0x109/0x1a0 [ 285.421146] kmem_cache_free+0x1c2/0x450 [ 285.421680] __netif_receive_skb_core+0x2ce/0x1870 [ 285.422333] __netif_receive_skb_one_core+0x97/0x140 [ 285.423003] process_backlog+0x100/0x2f0 [ 285.423537] __napi_poll+0x5c/0x2d0 [ 285.424023] net_rx_action+0x2be/0x560 [ 285.424510] __do_softirq+0x11b/0x3de [ 285.425034] [ 285.425254] The buggy address belongs to the object at ffff8880bad31280 [ 285.425254] which belongs to the cache skbuff_head_cache of size 224 [ 285.426993] The buggy address is located 40 bytes inside of [ 285.426993] freed 224-byte region [ffff8880bad31280, ffff8880bad31360) [ 285.428572] [ 285.428798] The buggy address belongs to the physical page: [ 285.429540] page:00000000f4b77674 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0xbad31 [ 285.430758] flags: 0x100000000000200(slab|node=0|zone=1) [ 285.431447] page_type: 0xffffffff() [ 285.431934] raw: 0100000000000200 ffff88810094a8c0 dead000000000122 0000000000000000 [ 285.432757] raw: 0000000000000000 00000000800c000c 00000001ffffffff 0000000000000000 [ 285.433562] page dumped because: kasan: bad access detected [ 285.434144] [ 285.434320] Memory state around the buggy address: [ 285.434828] ffff8880bad31180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 285.435580] ffff8880bad31200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 285.436264] >ffff8880bad31280: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 285.436777] ^ [ 285.437106] ffff8880bad31300: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc [ 285.437616] ffff8880bad31380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 285.438126] ================================================================== [ 285.438662] Disabling lock debugging due to kernel taint Fix this by: 1. Changing sch_plug's .peek handler to qdisc_peek_dequeued(), a function compatible with non-work-conserving qdiscs 2. Checking the return value of qdisc_dequeue_peeked() in sch_qfq. Fixes: 462dbc9101ac ("pkt_sched: QFQ Plus: fair-queueing service at DRR cost") Reported-by: valis Signed-off-by: valis Signed-off-by: Jamal Hadi Salim Link: https://lore.kernel.org/r/20230901162237.11525-1-jhs@mojatatu.com Signed-off-by: Paolo Abeni (cherry picked from commit 8fc134fee27f2263988ae38920bc03da416b03d8) Signed-off-by: Greg Rose --- net/sched/sch_plug.c | 2 +- net/sched/sch_qfq.c | 22 +++++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index 5619d2eb17b69..4ddb4af61d103 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -214,7 +214,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = { .priv_size = sizeof(struct plug_sched_data), .enqueue = plug_enqueue, .dequeue = plug_dequeue, - .peek = qdisc_peek_head, + .peek = qdisc_peek_dequeued, .init = plug_init, .change = plug_change, .reset = qdisc_reset_queue, diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e6d1e297a51dd..15d2f8f083038 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -973,10 +973,13 @@ static void qfq_update_eligible(struct qfq_sched *q) } /* Dequeue head packet of the head class in the DRR queue of the aggregate. */ -static void agg_dequeue(struct qfq_aggregate *agg, - struct qfq_class *cl, unsigned int len) +static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, + struct qfq_class *cl, unsigned int len) { - qdisc_dequeue_peeked(cl->qdisc); + struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc); + + if (!skb) + return NULL; cl->deficit -= (int) len; @@ -986,6 +989,8 @@ static void agg_dequeue(struct qfq_aggregate *agg, cl->deficit += agg->lmax; list_move_tail(&cl->alist, &agg->active); } + + return skb; } static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, @@ -1131,11 +1136,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) if (!skb) return NULL; - qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; + + skb = agg_dequeue(in_serv_agg, cl, len); + + if (!skb) { + sch->q.qlen++; + return NULL; + } + + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); - agg_dequeue(in_serv_agg, cl, len); /* If lmax is lowered, through qfq_change_class, for a class * owning pending packets with larger size than the new value * of lmax, then the following condition may hold.