| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright (C) 2013 Cisco Systems, Inc, 2013. |
| 3 | * |
| 4 | * Author: Vijay Subramanian <vijaynsu@cisco.com> |
| 5 | * Author: Mythili Prabhu <mysuryan@cisco.com> |
| 6 | * |
| 7 | * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> |
| 8 | * University of Oslo, Norway. |
| 9 | * |
| 10 | * References: |
| 11 | * RFC 8033: https://tools.ietf.org/html/rfc8033 |
| 12 | */ |
| 13 | |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/skbuff.h> |
| 20 | #include <net/pkt_sched.h> |
| 21 | #include <net/inet_ecn.h> |
| 22 | #include <net/pie.h> |
| 23 | |
| 24 | /* private data for the Qdisc */ |
| 25 | struct pie_sched_data { |
| 26 | struct pie_vars vars; |
| 27 | struct pie_params params; |
| 28 | struct pie_stats stats; |
| 29 | struct timer_list adapt_timer; |
| 30 | struct Qdisc *sch; |
| 31 | }; |
| 32 | |
| 33 | bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, |
| 34 | struct pie_vars *vars, u32 backlog, u32 packet_size) |
| 35 | { |
| 36 | u64 rnd; |
| 37 | u64 local_prob = vars->prob; |
| 38 | u32 mtu = psched_mtu(dev: qdisc_dev(qdisc: sch)); |
| 39 | |
| 40 | /* If there is still burst allowance left skip random early drop */ |
| 41 | if (vars->burst_time > 0) |
| 42 | return false; |
| 43 | |
| 44 | /* If current delay is less than half of target, and |
| 45 | * if drop prob is low already, disable early_drop |
| 46 | */ |
| 47 | if ((vars->qdelay < params->target / 2) && |
| 48 | (vars->prob < MAX_PROB / 5)) |
| 49 | return false; |
| 50 | |
| 51 | /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early, |
| 52 | * similar to min_th in RED |
| 53 | */ |
| 54 | if (backlog < 2 * mtu) |
| 55 | return false; |
| 56 | |
| 57 | /* If bytemode is turned on, use packet size to compute new |
| 58 | * probablity. Smaller packets will have lower drop prob in this case |
| 59 | */ |
| 60 | if (params->bytemode && packet_size <= mtu) |
| 61 | local_prob = (u64)packet_size * div_u64(dividend: local_prob, divisor: mtu); |
| 62 | else |
| 63 | local_prob = vars->prob; |
| 64 | |
| 65 | if (local_prob == 0) |
| 66 | vars->accu_prob = 0; |
| 67 | else |
| 68 | vars->accu_prob += local_prob; |
| 69 | |
| 70 | if (vars->accu_prob < (MAX_PROB / 100) * 85) |
| 71 | return false; |
| 72 | if (vars->accu_prob >= (MAX_PROB / 2) * 17) |
| 73 | return true; |
| 74 | |
| 75 | get_random_bytes(buf: &rnd, len: 8); |
| 76 | if ((rnd >> BITS_PER_BYTE) < local_prob) { |
| 77 | vars->accu_prob = 0; |
| 78 | return true; |
| 79 | } |
| 80 | |
| 81 | return false; |
| 82 | } |
| 83 | EXPORT_SYMBOL_GPL(pie_drop_early); |
| 84 | |
| 85 | static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 86 | struct sk_buff **to_free) |
| 87 | { |
| 88 | enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; |
| 89 | struct pie_sched_data *q = qdisc_priv(sch); |
| 90 | bool enqueue = false; |
| 91 | |
| 92 | if (unlikely(qdisc_qlen(sch) >= sch->limit)) { |
| 93 | q->stats.overlimit++; |
| 94 | goto out; |
| 95 | } |
| 96 | |
| 97 | reason = SKB_DROP_REASON_QDISC_CONGESTED; |
| 98 | |
| 99 | if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, |
| 100 | skb->len)) { |
| 101 | enqueue = true; |
| 102 | } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && |
| 103 | INET_ECN_set_ce(skb)) { |
| 104 | /* If packet is ecn capable, mark it if drop probability |
| 105 | * is lower than 10%, else drop it. |
| 106 | */ |
| 107 | q->stats.ecn_mark++; |
| 108 | enqueue = true; |
| 109 | } |
| 110 | |
| 111 | /* we can enqueue the packet */ |
| 112 | if (enqueue) { |
| 113 | /* Set enqueue time only when dq_rate_estimator is disabled. */ |
| 114 | if (!q->params.dq_rate_estimator) |
| 115 | pie_set_enqueue_time(skb); |
| 116 | |
| 117 | q->stats.packets_in++; |
| 118 | if (qdisc_qlen(q: sch) > q->stats.maxq) |
| 119 | q->stats.maxq = qdisc_qlen(q: sch); |
| 120 | |
| 121 | return qdisc_enqueue_tail(skb, sch); |
| 122 | } |
| 123 | |
| 124 | out: |
| 125 | q->stats.dropped++; |
| 126 | q->vars.accu_prob = 0; |
| 127 | return qdisc_drop_reason(skb, sch, to_free, reason); |
| 128 | } |
| 129 | |
| 130 | static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { |
| 131 | [TCA_PIE_TARGET] = {.type = NLA_U32}, |
| 132 | [TCA_PIE_LIMIT] = {.type = NLA_U32}, |
| 133 | [TCA_PIE_TUPDATE] = {.type = NLA_U32}, |
| 134 | [TCA_PIE_ALPHA] = {.type = NLA_U32}, |
| 135 | [TCA_PIE_BETA] = {.type = NLA_U32}, |
| 136 | [TCA_PIE_ECN] = {.type = NLA_U32}, |
| 137 | [TCA_PIE_BYTEMODE] = {.type = NLA_U32}, |
| 138 | [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32}, |
| 139 | }; |
| 140 | |
| 141 | static int pie_change(struct Qdisc *sch, struct nlattr *opt, |
| 142 | struct netlink_ext_ack *extack) |
| 143 | { |
| 144 | unsigned int dropped_pkts = 0, dropped_bytes = 0; |
| 145 | struct pie_sched_data *q = qdisc_priv(sch); |
| 146 | struct nlattr *tb[TCA_PIE_MAX + 1]; |
| 147 | int err; |
| 148 | |
| 149 | err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, nla: opt, policy: pie_policy, |
| 150 | NULL); |
| 151 | if (err < 0) |
| 152 | return err; |
| 153 | |
| 154 | sch_tree_lock(q: sch); |
| 155 | |
| 156 | /* convert from microseconds to pschedtime */ |
| 157 | if (tb[TCA_PIE_TARGET]) { |
| 158 | /* target is in us */ |
| 159 | u32 target = nla_get_u32(nla: tb[TCA_PIE_TARGET]); |
| 160 | |
| 161 | /* convert to pschedtime */ |
| 162 | WRITE_ONCE(q->params.target, |
| 163 | PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC)); |
| 164 | } |
| 165 | |
| 166 | /* tupdate is in jiffies */ |
| 167 | if (tb[TCA_PIE_TUPDATE]) |
| 168 | WRITE_ONCE(q->params.tupdate, |
| 169 | usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]))); |
| 170 | |
| 171 | if (tb[TCA_PIE_LIMIT]) { |
| 172 | u32 limit = nla_get_u32(nla: tb[TCA_PIE_LIMIT]); |
| 173 | |
| 174 | WRITE_ONCE(q->params.limit, limit); |
| 175 | WRITE_ONCE(sch->limit, limit); |
| 176 | } |
| 177 | |
| 178 | if (tb[TCA_PIE_ALPHA]) |
| 179 | WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA])); |
| 180 | |
| 181 | if (tb[TCA_PIE_BETA]) |
| 182 | WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA])); |
| 183 | |
| 184 | if (tb[TCA_PIE_ECN]) |
| 185 | WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN])); |
| 186 | |
| 187 | if (tb[TCA_PIE_BYTEMODE]) |
| 188 | WRITE_ONCE(q->params.bytemode, |
| 189 | nla_get_u32(tb[TCA_PIE_BYTEMODE])); |
| 190 | |
| 191 | if (tb[TCA_PIE_DQ_RATE_ESTIMATOR]) |
| 192 | WRITE_ONCE(q->params.dq_rate_estimator, |
| 193 | nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR])); |
| 194 | |
| 195 | /* Drop excess packets if new limit is lower */ |
| 196 | while (sch->q.qlen > sch->limit) { |
| 197 | struct sk_buff *skb = qdisc_dequeue_internal(sch, direct: true); |
| 198 | |
| 199 | if (!skb) |
| 200 | break; |
| 201 | |
| 202 | dropped_pkts++; |
| 203 | dropped_bytes += qdisc_pkt_len(skb); |
| 204 | rtnl_qdisc_drop(skb, sch); |
| 205 | } |
| 206 | qdisc_tree_reduce_backlog(qdisc: sch, n: dropped_pkts, len: dropped_bytes); |
| 207 | |
| 208 | sch_tree_unlock(q: sch); |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params, |
| 213 | struct pie_vars *vars, u32 backlog) |
| 214 | { |
| 215 | psched_time_t now = psched_get_time(); |
| 216 | u32 dtime = 0; |
| 217 | |
| 218 | /* If dq_rate_estimator is disabled, calculate qdelay using the |
| 219 | * packet timestamp. |
| 220 | */ |
| 221 | if (!params->dq_rate_estimator) { |
| 222 | vars->qdelay = now - pie_get_enqueue_time(skb); |
| 223 | |
| 224 | if (vars->dq_tstamp != DTIME_INVALID) |
| 225 | dtime = now - vars->dq_tstamp; |
| 226 | |
| 227 | vars->dq_tstamp = now; |
| 228 | |
| 229 | if (backlog == 0) |
| 230 | vars->qdelay = 0; |
| 231 | |
| 232 | if (dtime == 0) |
| 233 | return; |
| 234 | |
| 235 | goto burst_allowance_reduction; |
| 236 | } |
| 237 | |
| 238 | /* If current queue is about 10 packets or more and dq_count is unset |
| 239 | * we have enough packets to calculate the drain rate. Save |
| 240 | * current time as dq_tstamp and start measurement cycle. |
| 241 | */ |
| 242 | if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) { |
| 243 | vars->dq_tstamp = psched_get_time(); |
| 244 | vars->dq_count = 0; |
| 245 | } |
| 246 | |
| 247 | /* Calculate the average drain rate from this value. If queue length |
| 248 | * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset |
| 249 | * the dq_count to -1 as we don't have enough packets to calculate the |
| 250 | * drain rate anymore. The following if block is entered only when we |
| 251 | * have a substantial queue built up (QUEUE_THRESHOLD bytes or more) |
| 252 | * and we calculate the drain rate for the threshold here. dq_count is |
| 253 | * in bytes, time difference in psched_time, hence rate is in |
| 254 | * bytes/psched_time. |
| 255 | */ |
| 256 | if (vars->dq_count != DQCOUNT_INVALID) { |
| 257 | vars->dq_count += skb->len; |
| 258 | |
| 259 | if (vars->dq_count >= QUEUE_THRESHOLD) { |
| 260 | u32 count = vars->dq_count << PIE_SCALE; |
| 261 | |
| 262 | dtime = now - vars->dq_tstamp; |
| 263 | |
| 264 | if (dtime == 0) |
| 265 | return; |
| 266 | |
| 267 | count = count / dtime; |
| 268 | |
| 269 | if (vars->avg_dq_rate == 0) |
| 270 | vars->avg_dq_rate = count; |
| 271 | else |
| 272 | vars->avg_dq_rate = |
| 273 | (vars->avg_dq_rate - |
| 274 | (vars->avg_dq_rate >> 3)) + (count >> 3); |
| 275 | |
| 276 | /* If the queue has receded below the threshold, we hold |
| 277 | * on to the last drain rate calculated, else we reset |
| 278 | * dq_count to 0 to re-enter the if block when the next |
| 279 | * packet is dequeued |
| 280 | */ |
| 281 | if (backlog < QUEUE_THRESHOLD) { |
| 282 | vars->dq_count = DQCOUNT_INVALID; |
| 283 | } else { |
| 284 | vars->dq_count = 0; |
| 285 | vars->dq_tstamp = psched_get_time(); |
| 286 | } |
| 287 | |
| 288 | goto burst_allowance_reduction; |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | return; |
| 293 | |
| 294 | burst_allowance_reduction: |
| 295 | if (vars->burst_time > 0) { |
| 296 | if (vars->burst_time > dtime) |
| 297 | vars->burst_time -= dtime; |
| 298 | else |
| 299 | vars->burst_time = 0; |
| 300 | } |
| 301 | } |
| 302 | EXPORT_SYMBOL_GPL(pie_process_dequeue); |
| 303 | |
| 304 | void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, |
| 305 | u32 backlog) |
| 306 | { |
| 307 | psched_time_t qdelay = 0; /* in pschedtime */ |
| 308 | psched_time_t qdelay_old = 0; /* in pschedtime */ |
| 309 | s64 delta = 0; /* determines the change in probability */ |
| 310 | u64 oldprob; |
| 311 | u64 alpha, beta; |
| 312 | u32 power; |
| 313 | bool update_prob = true; |
| 314 | |
| 315 | if (params->dq_rate_estimator) { |
| 316 | qdelay_old = vars->qdelay; |
| 317 | vars->qdelay_old = vars->qdelay; |
| 318 | |
| 319 | if (vars->avg_dq_rate > 0) |
| 320 | qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate; |
| 321 | else |
| 322 | qdelay = 0; |
| 323 | } else { |
| 324 | qdelay = vars->qdelay; |
| 325 | qdelay_old = vars->qdelay_old; |
| 326 | } |
| 327 | |
| 328 | /* If qdelay is zero and backlog is not, it means backlog is very small, |
| 329 | * so we do not update probability in this round. |
| 330 | */ |
| 331 | if (qdelay == 0 && backlog != 0) |
| 332 | update_prob = false; |
| 333 | |
| 334 | /* In the algorithm, alpha and beta are between 0 and 2 with typical |
| 335 | * value for alpha as 0.125. In this implementation, we use values 0-32 |
| 336 | * passed from user space to represent this. Also, alpha and beta have |
| 337 | * unit of HZ and need to be scaled before they can used to update |
| 338 | * probability. alpha/beta are updated locally below by scaling down |
| 339 | * by 16 to come to 0-2 range. |
| 340 | */ |
| 341 | alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; |
| 342 | beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; |
| 343 | |
| 344 | /* We scale alpha and beta differently depending on how heavy the |
| 345 | * congestion is. Please see RFC 8033 for details. |
| 346 | */ |
| 347 | if (vars->prob < MAX_PROB / 10) { |
| 348 | alpha >>= 1; |
| 349 | beta >>= 1; |
| 350 | |
| 351 | power = 100; |
| 352 | while (vars->prob < div_u64(MAX_PROB, divisor: power) && |
| 353 | power <= 1000000) { |
| 354 | alpha >>= 2; |
| 355 | beta >>= 2; |
| 356 | power *= 10; |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | /* alpha and beta should be between 0 and 32, in multiples of 1/16 */ |
| 361 | delta += alpha * (qdelay - params->target); |
| 362 | delta += beta * (qdelay - qdelay_old); |
| 363 | |
| 364 | oldprob = vars->prob; |
| 365 | |
| 366 | /* to ensure we increase probability in steps of no more than 2% */ |
| 367 | if (delta > (s64)(MAX_PROB / (100 / 2)) && |
| 368 | vars->prob >= MAX_PROB / 10) |
| 369 | delta = (MAX_PROB / 100) * 2; |
| 370 | |
| 371 | /* Non-linear drop: |
| 372 | * Tune drop probability to increase quickly for high delays(>= 250ms) |
| 373 | * 250ms is derived through experiments and provides error protection |
| 374 | */ |
| 375 | |
| 376 | if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC))) |
| 377 | delta += MAX_PROB / (100 / 2); |
| 378 | |
| 379 | vars->prob += delta; |
| 380 | |
| 381 | if (delta > 0) { |
| 382 | /* prevent overflow */ |
| 383 | if (vars->prob < oldprob) { |
| 384 | vars->prob = MAX_PROB; |
| 385 | /* Prevent normalization error. If probability is at |
| 386 | * maximum value already, we normalize it here, and |
| 387 | * skip the check to do a non-linear drop in the next |
| 388 | * section. |
| 389 | */ |
| 390 | update_prob = false; |
| 391 | } |
| 392 | } else { |
| 393 | /* prevent underflow */ |
| 394 | if (vars->prob > oldprob) |
| 395 | vars->prob = 0; |
| 396 | } |
| 397 | |
| 398 | /* Non-linear drop in probability: Reduce drop probability quickly if |
| 399 | * delay is 0 for 2 consecutive Tupdate periods. |
| 400 | */ |
| 401 | |
| 402 | if (qdelay == 0 && qdelay_old == 0 && update_prob) |
| 403 | /* Reduce drop probability to 98.4% */ |
| 404 | vars->prob -= vars->prob / 64; |
| 405 | |
| 406 | vars->qdelay = qdelay; |
| 407 | vars->backlog_old = backlog; |
| 408 | |
| 409 | /* We restart the measurement cycle if the following conditions are met |
| 410 | * 1. If the delay has been low for 2 consecutive Tupdate periods |
| 411 | * 2. Calculated drop probability is zero |
| 412 | * 3. If average dq_rate_estimator is enabled, we have at least one |
| 413 | * estimate for the avg_dq_rate ie., is a non-zero value |
| 414 | */ |
| 415 | if ((vars->qdelay < params->target / 2) && |
| 416 | (vars->qdelay_old < params->target / 2) && |
| 417 | vars->prob == 0 && |
| 418 | (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) { |
| 419 | pie_vars_init(vars); |
| 420 | } |
| 421 | |
| 422 | if (!params->dq_rate_estimator) |
| 423 | vars->qdelay_old = qdelay; |
| 424 | } |
| 425 | EXPORT_SYMBOL_GPL(pie_calculate_probability); |
| 426 | |
| 427 | static void pie_timer(struct timer_list *t) |
| 428 | { |
| 429 | struct pie_sched_data *q = timer_container_of(q, t, adapt_timer); |
| 430 | struct Qdisc *sch = q->sch; |
| 431 | spinlock_t *root_lock; |
| 432 | |
| 433 | rcu_read_lock(); |
| 434 | root_lock = qdisc_lock(qdisc: qdisc_root_sleeping(qdisc: sch)); |
| 435 | spin_lock(lock: root_lock); |
| 436 | pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); |
| 437 | |
| 438 | /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */ |
| 439 | if (q->params.tupdate) |
| 440 | mod_timer(timer: &q->adapt_timer, expires: jiffies + q->params.tupdate); |
| 441 | spin_unlock(lock: root_lock); |
| 442 | rcu_read_unlock(); |
| 443 | } |
| 444 | |
| 445 | static int pie_init(struct Qdisc *sch, struct nlattr *opt, |
| 446 | struct netlink_ext_ack *extack) |
| 447 | { |
| 448 | struct pie_sched_data *q = qdisc_priv(sch); |
| 449 | |
| 450 | pie_params_init(params: &q->params); |
| 451 | pie_vars_init(vars: &q->vars); |
| 452 | sch->limit = q->params.limit; |
| 453 | |
| 454 | q->sch = sch; |
| 455 | timer_setup(&q->adapt_timer, pie_timer, 0); |
| 456 | |
| 457 | if (opt) { |
| 458 | int err = pie_change(sch, opt, extack); |
| 459 | |
| 460 | if (err) |
| 461 | return err; |
| 462 | } |
| 463 | |
| 464 | mod_timer(timer: &q->adapt_timer, expires: jiffies + HZ / 2); |
| 465 | return 0; |
| 466 | } |
| 467 | |
| 468 | static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 469 | { |
| 470 | struct pie_sched_data *q = qdisc_priv(sch); |
| 471 | struct nlattr *opts; |
| 472 | |
| 473 | opts = nla_nest_start_noflag(skb, attrtype: TCA_OPTIONS); |
| 474 | if (!opts) |
| 475 | goto nla_put_failure; |
| 476 | |
| 477 | /* convert target from pschedtime to us */ |
| 478 | if (nla_put_u32(skb, attrtype: TCA_PIE_TARGET, |
| 479 | value: ((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) / |
| 480 | NSEC_PER_USEC) || |
| 481 | nla_put_u32(skb, attrtype: TCA_PIE_LIMIT, READ_ONCE(sch->limit)) || |
| 482 | nla_put_u32(skb, attrtype: TCA_PIE_TUPDATE, |
| 483 | value: jiffies_to_usecs(READ_ONCE(q->params.tupdate))) || |
| 484 | nla_put_u32(skb, attrtype: TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) || |
| 485 | nla_put_u32(skb, attrtype: TCA_PIE_BETA, READ_ONCE(q->params.beta)) || |
| 486 | nla_put_u32(skb, attrtype: TCA_PIE_ECN, value: q->params.ecn) || |
| 487 | nla_put_u32(skb, attrtype: TCA_PIE_BYTEMODE, |
| 488 | READ_ONCE(q->params.bytemode)) || |
| 489 | nla_put_u32(skb, attrtype: TCA_PIE_DQ_RATE_ESTIMATOR, |
| 490 | READ_ONCE(q->params.dq_rate_estimator))) |
| 491 | goto nla_put_failure; |
| 492 | |
| 493 | return nla_nest_end(skb, start: opts); |
| 494 | |
| 495 | nla_put_failure: |
| 496 | nla_nest_cancel(skb, start: opts); |
| 497 | return -1; |
| 498 | } |
| 499 | |
| 500 | static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) |
| 501 | { |
| 502 | struct pie_sched_data *q = qdisc_priv(sch); |
| 503 | struct tc_pie_xstats st = { |
| 504 | .prob = q->vars.prob << BITS_PER_BYTE, |
| 505 | .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / |
| 506 | NSEC_PER_USEC, |
| 507 | .packets_in = q->stats.packets_in, |
| 508 | .overlimit = q->stats.overlimit, |
| 509 | .maxq = q->stats.maxq, |
| 510 | .dropped = q->stats.dropped, |
| 511 | .ecn_mark = q->stats.ecn_mark, |
| 512 | }; |
| 513 | |
| 514 | /* avg_dq_rate is only valid if dq_rate_estimator is enabled */ |
| 515 | st.dq_rate_estimating = q->params.dq_rate_estimator; |
| 516 | |
| 517 | /* unscale and return dq_rate in bytes per sec */ |
| 518 | if (q->params.dq_rate_estimator) |
| 519 | st.avg_dq_rate = q->vars.avg_dq_rate * |
| 520 | (PSCHED_TICKS_PER_SEC) >> PIE_SCALE; |
| 521 | |
| 522 | return gnet_stats_copy_app(d, st: &st, len: sizeof(st)); |
| 523 | } |
| 524 | |
| 525 | static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) |
| 526 | { |
| 527 | struct pie_sched_data *q = qdisc_priv(sch); |
| 528 | struct sk_buff *skb = qdisc_dequeue_head(sch); |
| 529 | |
| 530 | if (!skb) |
| 531 | return NULL; |
| 532 | |
| 533 | pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog); |
| 534 | return skb; |
| 535 | } |
| 536 | |
| 537 | static void pie_reset(struct Qdisc *sch) |
| 538 | { |
| 539 | struct pie_sched_data *q = qdisc_priv(sch); |
| 540 | |
| 541 | qdisc_reset_queue(sch); |
| 542 | pie_vars_init(vars: &q->vars); |
| 543 | } |
| 544 | |
| 545 | static void pie_destroy(struct Qdisc *sch) |
| 546 | { |
| 547 | struct pie_sched_data *q = qdisc_priv(sch); |
| 548 | |
| 549 | q->params.tupdate = 0; |
| 550 | timer_delete_sync(timer: &q->adapt_timer); |
| 551 | } |
| 552 | |
| 553 | static struct Qdisc_ops pie_qdisc_ops __read_mostly = { |
| 554 | .id = "pie" , |
| 555 | .priv_size = sizeof(struct pie_sched_data), |
| 556 | .enqueue = pie_qdisc_enqueue, |
| 557 | .dequeue = pie_qdisc_dequeue, |
| 558 | .peek = qdisc_peek_dequeued, |
| 559 | .init = pie_init, |
| 560 | .destroy = pie_destroy, |
| 561 | .reset = pie_reset, |
| 562 | .change = pie_change, |
| 563 | .dump = pie_dump, |
| 564 | .dump_stats = pie_dump_stats, |
| 565 | .owner = THIS_MODULE, |
| 566 | }; |
| 567 | MODULE_ALIAS_NET_SCH("pie" ); |
| 568 | |
| 569 | static int __init pie_module_init(void) |
| 570 | { |
| 571 | return register_qdisc(qops: &pie_qdisc_ops); |
| 572 | } |
| 573 | |
| 574 | static void __exit pie_module_exit(void) |
| 575 | { |
| 576 | unregister_qdisc(qops: &pie_qdisc_ops); |
| 577 | } |
| 578 | |
| 579 | module_init(pie_module_init); |
| 580 | module_exit(pie_module_exit); |
| 581 | |
| 582 | MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler" ); |
| 583 | MODULE_AUTHOR("Vijay Subramanian" ); |
| 584 | MODULE_AUTHOR("Mythili Prabhu" ); |
| 585 | MODULE_LICENSE("GPL" ); |
| 586 | |