c716a81ab9
Over the years this code has gotten hairier. Resulting in many long discussions over long summer days and patches that get it wrong. This patch helps tame that code so normal people will understand it. Thanks to Thomas Graf, Peter J. waskiewicz Jr, and Patrick McHardy for their valuable reviews. Signed-off-by: Jamal Hadi Salim <hadi@cyberus.ca> Signed-off-by: David S. Miller <davem@davemloft.net>
630 lines
14 KiB
C
630 lines
14 KiB
C
/*
|
|
* net/sched/sch_generic.c Generic packet scheduler routines.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
|
|
* Jamal Hadi Salim, <hadi@cyberus.ca> 990601
|
|
* - Ingress support
|
|
*/
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/system.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/module.h>
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/string.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/socket.h>
|
|
#include <linux/sockios.h>
|
|
#include <linux/in.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/init.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/list.h>
|
|
#include <net/sock.h>
|
|
#include <net/pkt_sched.h>
|
|
|
|
#define SCHED_TX_DROP -2
|
|
#define SCHED_TX_QUEUE -3
|
|
|
|
/* Main transmission queue. */
|
|
|
|
/* Modifications to data participating in scheduling must be protected with
|
|
* dev->queue_lock spinlock.
|
|
*
|
|
* The idea is the following:
|
|
* - enqueue, dequeue are serialized via top level device
|
|
* spinlock dev->queue_lock.
|
|
* - ingress filtering is serialized via top level device
|
|
* spinlock dev->ingress_lock.
|
|
* - updates to tree and tree walking are only done under the rtnl mutex.
|
|
*/
|
|
|
|
void qdisc_lock_tree(struct net_device *dev)
|
|
{
|
|
spin_lock_bh(&dev->queue_lock);
|
|
spin_lock(&dev->ingress_lock);
|
|
}
|
|
|
|
void qdisc_unlock_tree(struct net_device *dev)
|
|
{
|
|
spin_unlock(&dev->ingress_lock);
|
|
spin_unlock_bh(&dev->queue_lock);
|
|
}
|
|
|
|
static inline int qdisc_qlen(struct Qdisc *q)
|
|
{
|
|
BUG_ON((int) q->q.qlen < 0);
|
|
return q->q.qlen;
|
|
}
|
|
|
|
static inline int handle_dev_cpu_collision(struct net_device *dev)
|
|
{
|
|
if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
|
|
if (net_ratelimit())
|
|
printk(KERN_WARNING
|
|
"Dead loop on netdevice %s, fix it urgently!\n",
|
|
dev->name);
|
|
return SCHED_TX_DROP;
|
|
}
|
|
__get_cpu_var(netdev_rx_stat).cpu_collision++;
|
|
return SCHED_TX_QUEUE;
|
|
}
|
|
|
|
static inline int
|
|
do_dev_requeue(struct sk_buff *skb, struct net_device *dev, struct Qdisc *q)
|
|
{
|
|
|
|
if (unlikely(skb->next))
|
|
dev->gso_skb = skb;
|
|
else
|
|
q->ops->requeue(skb, q);
|
|
/* XXX: Could netif_schedule fail? Or is the fact we are
|
|
* requeueing imply the hardware path is closed
|
|
* and even if we fail, some interupt will wake us
|
|
*/
|
|
netif_schedule(dev);
|
|
return 0;
|
|
}
|
|
|
|
static inline struct sk_buff *
|
|
try_get_tx_pkt(struct net_device *dev, struct Qdisc *q)
|
|
{
|
|
struct sk_buff *skb = dev->gso_skb;
|
|
|
|
if (skb)
|
|
dev->gso_skb = NULL;
|
|
else
|
|
skb = q->dequeue(q);
|
|
|
|
return skb;
|
|
}
|
|
|
|
static inline int
|
|
tx_islocked(struct sk_buff *skb, struct net_device *dev, struct Qdisc *q)
|
|
{
|
|
int ret = handle_dev_cpu_collision(dev);
|
|
|
|
if (ret == SCHED_TX_DROP) {
|
|
kfree_skb(skb);
|
|
return qdisc_qlen(q);
|
|
}
|
|
|
|
return do_dev_requeue(skb, dev, q);
|
|
}
|
|
|
|
|
|
/*
|
|
NOTE: Called under dev->queue_lock with locally disabled BH.
|
|
|
|
__LINK_STATE_QDISC_RUNNING guarantees only one CPU
|
|
can enter this region at a time.
|
|
|
|
dev->queue_lock serializes queue accesses for this device
|
|
AND dev->qdisc pointer itself.
|
|
|
|
netif_tx_lock serializes accesses to device driver.
|
|
|
|
dev->queue_lock and netif_tx_lock are mutually exclusive,
|
|
if one is grabbed, another must be free.
|
|
|
|
Multiple CPUs may contend for the two locks.
|
|
|
|
Note, that this procedure can be called by a watchdog timer
|
|
|
|
Returns to the caller:
|
|
Returns: 0 - queue is empty or throttled.
|
|
>0 - queue is not empty.
|
|
|
|
*/
|
|
|
|
static inline int qdisc_restart(struct net_device *dev)
|
|
{
|
|
struct Qdisc *q = dev->qdisc;
|
|
unsigned lockless = (dev->features & NETIF_F_LLTX);
|
|
struct sk_buff *skb;
|
|
int ret;
|
|
|
|
skb = try_get_tx_pkt(dev, q);
|
|
if (skb == NULL)
|
|
return 0;
|
|
|
|
/* we have a packet to send */
|
|
if (!lockless) {
|
|
if (!netif_tx_trylock(dev))
|
|
return tx_islocked(skb, dev, q);
|
|
}
|
|
/* all clear .. */
|
|
spin_unlock(&dev->queue_lock);
|
|
|
|
ret = NETDEV_TX_BUSY;
|
|
if (!netif_queue_stopped(dev))
|
|
/* churn baby churn .. */
|
|
ret = dev_hard_start_xmit(skb, dev);
|
|
|
|
if (!lockless)
|
|
netif_tx_unlock(dev);
|
|
|
|
spin_lock(&dev->queue_lock);
|
|
|
|
/* we need to refresh q because it may be invalid since
|
|
* we dropped dev->queue_lock earlier ...
|
|
* So dont try to be clever grasshopper
|
|
*/
|
|
q = dev->qdisc;
|
|
/* most likely result, packet went ok */
|
|
if (ret == NETDEV_TX_OK)
|
|
return qdisc_qlen(q);
|
|
/* only for lockless drivers .. */
|
|
if (ret == NETDEV_TX_LOCKED && lockless)
|
|
return tx_islocked(skb, dev, q);
|
|
|
|
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
|
|
printk(KERN_WARNING " BUG %s code %d qlen %d\n",dev->name, ret, q->q.qlen);
|
|
|
|
return do_dev_requeue(skb, dev, q);
|
|
}
|
|
|
|
|
|
void __qdisc_run(struct net_device *dev)
|
|
{
|
|
do {
|
|
if (!qdisc_restart(dev))
|
|
break;
|
|
} while (!netif_queue_stopped(dev));
|
|
|
|
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
|
|
}
|
|
|
|
static void dev_watchdog(unsigned long arg)
|
|
{
|
|
struct net_device *dev = (struct net_device *)arg;
|
|
|
|
netif_tx_lock(dev);
|
|
if (dev->qdisc != &noop_qdisc) {
|
|
if (netif_device_present(dev) &&
|
|
netif_running(dev) &&
|
|
netif_carrier_ok(dev)) {
|
|
if (netif_queue_stopped(dev) &&
|
|
time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
|
|
|
|
printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
|
|
dev->name);
|
|
dev->tx_timeout(dev);
|
|
}
|
|
if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
|
|
dev_hold(dev);
|
|
}
|
|
}
|
|
netif_tx_unlock(dev);
|
|
|
|
dev_put(dev);
|
|
}
|
|
|
|
static void dev_watchdog_init(struct net_device *dev)
|
|
{
|
|
init_timer(&dev->watchdog_timer);
|
|
dev->watchdog_timer.data = (unsigned long)dev;
|
|
dev->watchdog_timer.function = dev_watchdog;
|
|
}
|
|
|
|
void __netdev_watchdog_up(struct net_device *dev)
|
|
{
|
|
if (dev->tx_timeout) {
|
|
if (dev->watchdog_timeo <= 0)
|
|
dev->watchdog_timeo = 5*HZ;
|
|
if (!mod_timer(&dev->watchdog_timer,
|
|
round_jiffies(jiffies + dev->watchdog_timeo)))
|
|
dev_hold(dev);
|
|
}
|
|
}
|
|
|
|
static void dev_watchdog_up(struct net_device *dev)
|
|
{
|
|
__netdev_watchdog_up(dev);
|
|
}
|
|
|
|
static void dev_watchdog_down(struct net_device *dev)
|
|
{
|
|
netif_tx_lock_bh(dev);
|
|
if (del_timer(&dev->watchdog_timer))
|
|
dev_put(dev);
|
|
netif_tx_unlock_bh(dev);
|
|
}
|
|
|
|
void netif_carrier_on(struct net_device *dev)
|
|
{
|
|
if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
|
|
linkwatch_fire_event(dev);
|
|
if (netif_running(dev))
|
|
__netdev_watchdog_up(dev);
|
|
}
|
|
|
|
void netif_carrier_off(struct net_device *dev)
|
|
{
|
|
if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
|
|
linkwatch_fire_event(dev);
|
|
}
|
|
|
|
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
|
|
under all circumstances. It is difficult to invent anything faster or
|
|
cheaper.
|
|
*/
|
|
|
|
static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
|
|
{
|
|
kfree_skb(skb);
|
|
return NET_XMIT_CN;
|
|
}
|
|
|
|
static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
|
|
{
|
|
if (net_ratelimit())
|
|
printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
|
|
skb->dev->name);
|
|
kfree_skb(skb);
|
|
return NET_XMIT_CN;
|
|
}
|
|
|
|
struct Qdisc_ops noop_qdisc_ops = {
|
|
.id = "noop",
|
|
.priv_size = 0,
|
|
.enqueue = noop_enqueue,
|
|
.dequeue = noop_dequeue,
|
|
.requeue = noop_requeue,
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
struct Qdisc noop_qdisc = {
|
|
.enqueue = noop_enqueue,
|
|
.dequeue = noop_dequeue,
|
|
.flags = TCQ_F_BUILTIN,
|
|
.ops = &noop_qdisc_ops,
|
|
.list = LIST_HEAD_INIT(noop_qdisc.list),
|
|
};
|
|
|
|
static struct Qdisc_ops noqueue_qdisc_ops = {
|
|
.id = "noqueue",
|
|
.priv_size = 0,
|
|
.enqueue = noop_enqueue,
|
|
.dequeue = noop_dequeue,
|
|
.requeue = noop_requeue,
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
static struct Qdisc noqueue_qdisc = {
|
|
.enqueue = NULL,
|
|
.dequeue = noop_dequeue,
|
|
.flags = TCQ_F_BUILTIN,
|
|
.ops = &noqueue_qdisc_ops,
|
|
.list = LIST_HEAD_INIT(noqueue_qdisc.list),
|
|
};
|
|
|
|
|
|
static const u8 prio2band[TC_PRIO_MAX+1] =
|
|
{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
|
|
|
|
/* 3-band FIFO queue: old style, but should be a bit faster than
|
|
generic prio+fifo combination.
|
|
*/
|
|
|
|
#define PFIFO_FAST_BANDS 3
|
|
|
|
static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
|
|
struct Qdisc *qdisc)
|
|
{
|
|
struct sk_buff_head *list = qdisc_priv(qdisc);
|
|
return list + prio2band[skb->priority & TC_PRIO_MAX];
|
|
}
|
|
|
|
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
|
|
{
|
|
struct sk_buff_head *list = prio2list(skb, qdisc);
|
|
|
|
if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
|
|
qdisc->q.qlen++;
|
|
return __qdisc_enqueue_tail(skb, qdisc, list);
|
|
}
|
|
|
|
return qdisc_drop(skb, qdisc);
|
|
}
|
|
|
|
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
|
|
{
|
|
int prio;
|
|
struct sk_buff_head *list = qdisc_priv(qdisc);
|
|
|
|
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
if (!skb_queue_empty(list + prio)) {
|
|
qdisc->q.qlen--;
|
|
return __qdisc_dequeue_head(qdisc, list + prio);
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
|
|
{
|
|
qdisc->q.qlen++;
|
|
return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
|
|
}
|
|
|
|
static void pfifo_fast_reset(struct Qdisc* qdisc)
|
|
{
|
|
int prio;
|
|
struct sk_buff_head *list = qdisc_priv(qdisc);
|
|
|
|
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
|
|
__qdisc_reset_queue(qdisc, list + prio);
|
|
|
|
qdisc->qstats.backlog = 0;
|
|
qdisc->q.qlen = 0;
|
|
}
|
|
|
|
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
|
|
{
|
|
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
|
|
|
|
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
|
|
RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
|
|
return skb->len;
|
|
|
|
rtattr_failure:
|
|
return -1;
|
|
}
|
|
|
|
static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
|
|
{
|
|
int prio;
|
|
struct sk_buff_head *list = qdisc_priv(qdisc);
|
|
|
|
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
|
|
skb_queue_head_init(list + prio);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct Qdisc_ops pfifo_fast_ops = {
|
|
.id = "pfifo_fast",
|
|
.priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
|
|
.enqueue = pfifo_fast_enqueue,
|
|
.dequeue = pfifo_fast_dequeue,
|
|
.requeue = pfifo_fast_requeue,
|
|
.init = pfifo_fast_init,
|
|
.reset = pfifo_fast_reset,
|
|
.dump = pfifo_fast_dump,
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
|
|
{
|
|
void *p;
|
|
struct Qdisc *sch;
|
|
unsigned int size;
|
|
int err = -ENOBUFS;
|
|
|
|
/* ensure that the Qdisc and the private data are 32-byte aligned */
|
|
size = QDISC_ALIGN(sizeof(*sch));
|
|
size += ops->priv_size + (QDISC_ALIGNTO - 1);
|
|
|
|
p = kzalloc(size, GFP_KERNEL);
|
|
if (!p)
|
|
goto errout;
|
|
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
|
|
sch->padded = (char *) sch - (char *) p;
|
|
|
|
INIT_LIST_HEAD(&sch->list);
|
|
skb_queue_head_init(&sch->q);
|
|
sch->ops = ops;
|
|
sch->enqueue = ops->enqueue;
|
|
sch->dequeue = ops->dequeue;
|
|
sch->dev = dev;
|
|
dev_hold(dev);
|
|
atomic_set(&sch->refcnt, 1);
|
|
|
|
return sch;
|
|
errout:
|
|
return ERR_PTR(-err);
|
|
}
|
|
|
|
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
|
|
unsigned int parentid)
|
|
{
|
|
struct Qdisc *sch;
|
|
|
|
sch = qdisc_alloc(dev, ops);
|
|
if (IS_ERR(sch))
|
|
goto errout;
|
|
sch->stats_lock = &dev->queue_lock;
|
|
sch->parent = parentid;
|
|
|
|
if (!ops->init || ops->init(sch, NULL) == 0)
|
|
return sch;
|
|
|
|
qdisc_destroy(sch);
|
|
errout:
|
|
return NULL;
|
|
}
|
|
|
|
/* Under dev->queue_lock and BH! */
|
|
|
|
void qdisc_reset(struct Qdisc *qdisc)
|
|
{
|
|
struct Qdisc_ops *ops = qdisc->ops;
|
|
|
|
if (ops->reset)
|
|
ops->reset(qdisc);
|
|
}
|
|
|
|
/* this is the rcu callback function to clean up a qdisc when there
|
|
* are no further references to it */
|
|
|
|
static void __qdisc_destroy(struct rcu_head *head)
|
|
{
|
|
struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
|
|
kfree((char *) qdisc - qdisc->padded);
|
|
}
|
|
|
|
/* Under dev->queue_lock and BH! */
|
|
|
|
void qdisc_destroy(struct Qdisc *qdisc)
|
|
{
|
|
struct Qdisc_ops *ops = qdisc->ops;
|
|
|
|
if (qdisc->flags & TCQ_F_BUILTIN ||
|
|
!atomic_dec_and_test(&qdisc->refcnt))
|
|
return;
|
|
|
|
list_del(&qdisc->list);
|
|
#ifdef CONFIG_NET_ESTIMATOR
|
|
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
|
|
#endif
|
|
if (ops->reset)
|
|
ops->reset(qdisc);
|
|
if (ops->destroy)
|
|
ops->destroy(qdisc);
|
|
|
|
module_put(ops->owner);
|
|
dev_put(qdisc->dev);
|
|
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
|
|
}
|
|
|
|
void dev_activate(struct net_device *dev)
|
|
{
|
|
/* No queueing discipline is attached to device;
|
|
create default one i.e. pfifo_fast for devices,
|
|
which need queueing and noqueue_qdisc for
|
|
virtual interfaces
|
|
*/
|
|
|
|
if (dev->qdisc_sleeping == &noop_qdisc) {
|
|
struct Qdisc *qdisc;
|
|
if (dev->tx_queue_len) {
|
|
qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
|
|
TC_H_ROOT);
|
|
if (qdisc == NULL) {
|
|
printk(KERN_INFO "%s: activation failed\n", dev->name);
|
|
return;
|
|
}
|
|
list_add_tail(&qdisc->list, &dev->qdisc_list);
|
|
} else {
|
|
qdisc = &noqueue_qdisc;
|
|
}
|
|
dev->qdisc_sleeping = qdisc;
|
|
}
|
|
|
|
if (!netif_carrier_ok(dev))
|
|
/* Delay activation until next carrier-on event */
|
|
return;
|
|
|
|
spin_lock_bh(&dev->queue_lock);
|
|
rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
|
|
if (dev->qdisc != &noqueue_qdisc) {
|
|
dev->trans_start = jiffies;
|
|
dev_watchdog_up(dev);
|
|
}
|
|
spin_unlock_bh(&dev->queue_lock);
|
|
}
|
|
|
|
void dev_deactivate(struct net_device *dev)
|
|
{
|
|
struct Qdisc *qdisc;
|
|
struct sk_buff *skb;
|
|
|
|
spin_lock_bh(&dev->queue_lock);
|
|
qdisc = dev->qdisc;
|
|
dev->qdisc = &noop_qdisc;
|
|
|
|
qdisc_reset(qdisc);
|
|
|
|
skb = dev->gso_skb;
|
|
dev->gso_skb = NULL;
|
|
spin_unlock_bh(&dev->queue_lock);
|
|
|
|
kfree_skb(skb);
|
|
|
|
dev_watchdog_down(dev);
|
|
|
|
/* Wait for outstanding dev_queue_xmit calls. */
|
|
synchronize_rcu();
|
|
|
|
/* Wait for outstanding qdisc_run calls. */
|
|
while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
|
|
yield();
|
|
}
|
|
|
|
void dev_init_scheduler(struct net_device *dev)
|
|
{
|
|
qdisc_lock_tree(dev);
|
|
dev->qdisc = &noop_qdisc;
|
|
dev->qdisc_sleeping = &noop_qdisc;
|
|
INIT_LIST_HEAD(&dev->qdisc_list);
|
|
qdisc_unlock_tree(dev);
|
|
|
|
dev_watchdog_init(dev);
|
|
}
|
|
|
|
void dev_shutdown(struct net_device *dev)
|
|
{
|
|
struct Qdisc *qdisc;
|
|
|
|
qdisc_lock_tree(dev);
|
|
qdisc = dev->qdisc_sleeping;
|
|
dev->qdisc = &noop_qdisc;
|
|
dev->qdisc_sleeping = &noop_qdisc;
|
|
qdisc_destroy(qdisc);
|
|
#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
|
|
if ((qdisc = dev->qdisc_ingress) != NULL) {
|
|
dev->qdisc_ingress = NULL;
|
|
qdisc_destroy(qdisc);
|
|
}
|
|
#endif
|
|
BUG_TRAP(!timer_pending(&dev->watchdog_timer));
|
|
qdisc_unlock_tree(dev);
|
|
}
|
|
|
|
EXPORT_SYMBOL(netif_carrier_on);
|
|
EXPORT_SYMBOL(netif_carrier_off);
|
|
EXPORT_SYMBOL(noop_qdisc);
|
|
EXPORT_SYMBOL(qdisc_create_dflt);
|
|
EXPORT_SYMBOL(qdisc_destroy);
|
|
EXPORT_SYMBOL(qdisc_reset);
|
|
EXPORT_SYMBOL(qdisc_lock_tree);
|
|
EXPORT_SYMBOL(qdisc_unlock_tree);
|