1
linux/drivers/net/usb/cdc_ncm.c

2116 lines
63 KiB
C
Raw Normal View History

/*
* cdc_ncm.c
*
* Copyright (C) ST-Ericsson 2010-2012
* Contact: Alexey Orishko <alexey.orishko@stericsson.com>
* Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
*
* USB Host Driver for Network Control Model (NCM)
* http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip
*
* The NCM encoding, decoding and initialization logic
* derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
*
* This software is available to you under a choice of one of two
* licenses. You may choose this file to be licensed under the terms
* of the GNU General Public License (GPL) Version 2 or the 2-clause
* BSD license listed below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ctype.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/kstrtox.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/usb.h>
#include <linux/hrtimer.h>
#include <linux/atomic.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
#include <linux/usb/cdc_ncm.h>
#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
static bool prefer_mbim = true;
#else
static bool prefer_mbim;
#endif
module_param(prefer_mbim, bool, 0644);
MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions");
static void cdc_ncm_txpath_bh(struct tasklet_struct *t);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static struct usb_driver cdc_ncm_driver;
struct cdc_ncm_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
#define CDC_NCM_STAT(str, m) { \
.stat_string = str, \
.sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \
.stat_offset = offsetof(struct cdc_ncm_ctx, m) }
#define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m)
static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = {
CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full),
CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full),
CDC_NCM_SIMPLE_STAT(tx_reason_timeout),
CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram),
CDC_NCM_SIMPLE_STAT(tx_overhead),
CDC_NCM_SIMPLE_STAT(tx_ntbs),
CDC_NCM_SIMPLE_STAT(rx_overhead),
CDC_NCM_SIMPLE_STAT(rx_ntbs),
};
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
#define CDC_NCM_LOW_MEM_MAX_CNT 10
static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(cdc_ncm_gstrings_stats);
default:
return -EOPNOTSUPP;
}
}
static void cdc_ncm_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats __always_unused *stats,
u64 *data)
{
struct usbnet *dev = netdev_priv(netdev);
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
int i;
char *p = NULL;
for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
}
static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
static const struct ethtool_ops cdc_ncm_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_ts_info = ethtool_op_get_ts_info,
.get_sset_count = cdc_ncm_get_sset_count,
.get_strings = cdc_ncm_get_strings,
.get_ethtool_stats = cdc_ncm_get_ethtool_stats,
.get_link_ksettings = usbnet_get_link_ksettings_internal,
.set_link_ksettings = NULL,
};
static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u32 val, max, min;
/* clamp new_rx to sane values */
min = USB_CDC_NCM_NTB_MIN_IN_SIZE;
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
/* dwNtbInMaxSize spec violation? Use MIN size for both limits */
if (max < min) {
dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n",
le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min);
max = min;
}
val = clamp_t(u32, new_rx, min, max);
if (val != new_rx)
dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range\n", min, max);
return val;
}
static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u32 val, max, min;
/* clamp new_tx to sane values */
if (ctx->is_ndp16)
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
else
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize Currently in cdc_ncm_check_tx_max(), if dwNtbOutMaxSize is lower than the calculated "min" value, but greater than zero, the logic sets tx_max to dwNtbOutMaxSize. This is then used to allocate a new SKB in cdc_ncm_fill_tx_frame() where all the data is handled. For small values of dwNtbOutMaxSize the memory allocated during alloc_skb(dwNtbOutMaxSize, GFP_ATOMIC) will have the same size, due to how size is aligned at alloc time: size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); Thus we hit the same bug that we tried to squash with commit 2be6d4d16a084 ("net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero") Low values of dwNtbOutMaxSize do not cause an issue presently because at alloc_skb() time more memory (512b) is allocated than required for the SKB headers alone (320b), leaving some space (512b - 320b = 192b) for CDC data (172b). However, if more elements (for example 3 x u64 = [24b]) were added to one of the SKB header structs, say 'struct skb_shared_info', increasing its original size (320b [320b aligned]) to something larger (344b [384b aligned]), then suddenly the CDC data (172b) no longer fits in the spare SKB data area (512b - 384b = 128b). Consequently the SKB bounds checking semantics fails and panics: skbuff: skb_over_panic: text:ffffffff831f755b len:184 put:172 head:ffff88811f1c6c00 data:ffff88811f1c6c00 tail:0xb8 end:0x80 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:113! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 57 Comm: kworker/0:2 Not tainted 5.15.106-syzkaller-00249-g19c0ed55a470 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/14/2023 Workqueue: mld mld_ifc_work RIP: 0010:skb_panic net/core/skbuff.c:113 [inline] RIP: 0010:skb_over_panic+0x14c/0x150 net/core/skbuff.c:118 [snip] Call Trace: <TASK> skb_put+0x151/0x210 net/core/skbuff.c:2047 skb_put_zero include/linux/skbuff.h:2422 [inline] cdc_ncm_ndp16 drivers/net/usb/cdc_ncm.c:1131 [inline] cdc_ncm_fill_tx_frame+0x11ab/0x3da0 drivers/net/usb/cdc_ncm.c:1308 cdc_ncm_tx_fixup+0xa3/0x100 Deal with too low values of dwNtbOutMaxSize, clamp it in the range [USB_CDC_NCM_NTB_MIN_OUT_SIZE, CDC_NCM_NTB_MAX_SIZE_TX]. We ensure enough data space is allocated to handle CDC data by making sure dwNtbOutMaxSize is not smaller than USB_CDC_NCM_NTB_MIN_OUT_SIZE. Fixes: 289507d3364f ("net: cdc_ncm: use sysfs for rx/tx aggregation tuning") Cc: stable@vger.kernel.org Reported-by: syzbot+9f575a1f15fc0c01ed69@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=b982f1059506db48409d Link: https://lore.kernel.org/all/20211202143437.1411410-1-lee.jones@linaro.org/ Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org> Reviewed-by: Simon Horman <simon.horman@corigine.com> Link: https://lore.kernel.org/r/20230517133808.1873695-2-tudor.ambarus@linaro.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-05-17 06:38:08 -07:00
if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0)
net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero Currently, due to the sequential use of min_t() and clamp_t() macros, in cdc_ncm_check_tx_max(), if dwNtbOutMaxSize is not set, the logic sets tx_max to 0. This is then used to allocate the data area of the SKB requested later in cdc_ncm_fill_tx_frame(). This does not cause an issue presently because when memory is allocated during initialisation phase of SKB creation, more memory (512b) is allocated than is required for the SKB headers alone (320b), leaving some space (512b - 320b = 192b) for CDC data (172b). However, if more elements (for example 3 x u64 = [24b]) were added to one of the SKB header structs, say 'struct skb_shared_info', increasing its original size (320b [320b aligned]) to something larger (344b [384b aligned]), then suddenly the CDC data (172b) no longer fits in the spare SKB data area (512b - 384b = 128b). Consequently the SKB bounds checking semantics fails and panics: skbuff: skb_over_panic: text:ffffffff830a5b5f len:184 put:172 \ head:ffff888119227c00 data:ffff888119227c00 tail:0xb8 end:0x80 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:110! RIP: 0010:skb_panic+0x14f/0x160 net/core/skbuff.c:106 <snip> Call Trace: <IRQ> skb_over_panic+0x2c/0x30 net/core/skbuff.c:115 skb_put+0x205/0x210 net/core/skbuff.c:1877 skb_put_zero include/linux/skbuff.h:2270 [inline] cdc_ncm_ndp16 drivers/net/usb/cdc_ncm.c:1116 [inline] cdc_ncm_fill_tx_frame+0x127f/0x3d50 drivers/net/usb/cdc_ncm.c:1293 cdc_ncm_tx_fixup+0x98/0xf0 drivers/net/usb/cdc_ncm.c:1514 By overriding the max value with the default CDC_NCM_NTB_MAX_SIZE_TX when not offered through the system provided params, we ensure enough data space is allocated to handle the CDC data, meaning no crash will occur. Cc: Oliver Neukum <oliver@neukum.org> Fixes: 289507d3364f9 ("net: cdc_ncm: use sysfs for rx/tx aggregation tuning") Signed-off-by: Lee Jones <lee.jones@linaro.org> Reviewed-by: Bjørn Mork <bjorn@mork.no> Link: https://lore.kernel.org/r/20211202143437.1411410-1-lee.jones@linaro.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-12-02 07:34:37 -07:00
max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize Currently in cdc_ncm_check_tx_max(), if dwNtbOutMaxSize is lower than the calculated "min" value, but greater than zero, the logic sets tx_max to dwNtbOutMaxSize. This is then used to allocate a new SKB in cdc_ncm_fill_tx_frame() where all the data is handled. For small values of dwNtbOutMaxSize the memory allocated during alloc_skb(dwNtbOutMaxSize, GFP_ATOMIC) will have the same size, due to how size is aligned at alloc time: size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); Thus we hit the same bug that we tried to squash with commit 2be6d4d16a084 ("net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero") Low values of dwNtbOutMaxSize do not cause an issue presently because at alloc_skb() time more memory (512b) is allocated than required for the SKB headers alone (320b), leaving some space (512b - 320b = 192b) for CDC data (172b). However, if more elements (for example 3 x u64 = [24b]) were added to one of the SKB header structs, say 'struct skb_shared_info', increasing its original size (320b [320b aligned]) to something larger (344b [384b aligned]), then suddenly the CDC data (172b) no longer fits in the spare SKB data area (512b - 384b = 128b). Consequently the SKB bounds checking semantics fails and panics: skbuff: skb_over_panic: text:ffffffff831f755b len:184 put:172 head:ffff88811f1c6c00 data:ffff88811f1c6c00 tail:0xb8 end:0x80 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:113! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 57 Comm: kworker/0:2 Not tainted 5.15.106-syzkaller-00249-g19c0ed55a470 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/14/2023 Workqueue: mld mld_ifc_work RIP: 0010:skb_panic net/core/skbuff.c:113 [inline] RIP: 0010:skb_over_panic+0x14c/0x150 net/core/skbuff.c:118 [snip] Call Trace: <TASK> skb_put+0x151/0x210 net/core/skbuff.c:2047 skb_put_zero include/linux/skbuff.h:2422 [inline] cdc_ncm_ndp16 drivers/net/usb/cdc_ncm.c:1131 [inline] cdc_ncm_fill_tx_frame+0x11ab/0x3da0 drivers/net/usb/cdc_ncm.c:1308 cdc_ncm_tx_fixup+0xa3/0x100 Deal with too low values of dwNtbOutMaxSize, clamp it in the range [USB_CDC_NCM_NTB_MIN_OUT_SIZE, CDC_NCM_NTB_MAX_SIZE_TX]. We ensure enough data space is allocated to handle CDC data by making sure dwNtbOutMaxSize is not smaller than USB_CDC_NCM_NTB_MIN_OUT_SIZE. Fixes: 289507d3364f ("net: cdc_ncm: use sysfs for rx/tx aggregation tuning") Cc: stable@vger.kernel.org Reported-by: syzbot+9f575a1f15fc0c01ed69@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=b982f1059506db48409d Link: https://lore.kernel.org/all/20211202143437.1411410-1-lee.jones@linaro.org/ Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org> Reviewed-by: Simon Horman <simon.horman@corigine.com> Link: https://lore.kernel.org/r/20230517133808.1873695-2-tudor.ambarus@linaro.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-05-17 06:38:08 -07:00
else
max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize),
USB_CDC_NCM_NTB_MIN_OUT_SIZE,
CDC_NCM_NTB_MAX_SIZE_TX);
/* some devices set dwNtbOutMaxSize too low for the above default */
min = min(min, max);
val = clamp_t(u32, new_tx, min, max);
if (val != new_tx)
dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range\n", min, max);
return val;
}
static ssize_t min_tx_pkt_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%u\n", ctx->min_tx_pkt);
}
static ssize_t rx_max_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%u\n", ctx->rx_max);
}
static ssize_t tx_max_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%u\n", ctx->tx_max);
}
static ssize_t tx_timer_usecs_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%u\n", ctx->timer_interval / (u32)NSEC_PER_USEC);
}
static ssize_t min_tx_pkt_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
unsigned long val;
/* no need to restrict values - anything from 0 to infinity is OK */
if (kstrtoul(buf, 0, &val))
return -EINVAL;
ctx->min_tx_pkt = val;
return len;
}
static ssize_t rx_max_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
unsigned long val;
if (kstrtoul(buf, 0, &val) || cdc_ncm_check_rx_max(dev, val) != val)
return -EINVAL;
cdc_ncm_update_rxtx_max(dev, val, ctx->tx_max);
return len;
}
static ssize_t tx_max_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
unsigned long val;
if (kstrtoul(buf, 0, &val) || cdc_ncm_check_tx_max(dev, val) != val)
return -EINVAL;
cdc_ncm_update_rxtx_max(dev, ctx->rx_max, val);
return len;
}
static ssize_t tx_timer_usecs_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
ssize_t ret;
unsigned long val;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val && (val < CDC_NCM_TIMER_INTERVAL_MIN || val > CDC_NCM_TIMER_INTERVAL_MAX))
return -EINVAL;
spin_lock_bh(&ctx->mtx);
ctx->timer_interval = val * NSEC_PER_USEC;
if (!ctx->timer_interval)
ctx->tx_timer_pending = 0;
spin_unlock_bh(&ctx->mtx);
return len;
}
static DEVICE_ATTR_RW(min_tx_pkt);
static DEVICE_ATTR_RW(rx_max);
static DEVICE_ATTR_RW(tx_max);
static DEVICE_ATTR_RW(tx_timer_usecs);
static ssize_t ndp_to_end_show(struct device *d, struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%c\n", ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END ? 'Y' : 'N');
}
static ssize_t ndp_to_end_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
bool enable;
if (kstrtobool(buf, &enable))
return -EINVAL;
/* no change? */
if (enable == (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
return len;
if (enable) {
if (ctx->is_ndp16 && !ctx->delayed_ndp16) {
ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
if (!ctx->delayed_ndp16)
return -ENOMEM;
}
if (!ctx->is_ndp16 && !ctx->delayed_ndp32) {
ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
if (!ctx->delayed_ndp32)
return -ENOMEM;
}
}
/* flush pending data before changing flag */
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
spin_lock_bh(&ctx->mtx);
if (enable)
ctx->drvflags |= CDC_NCM_FLAG_NDP_TO_END;
else
ctx->drvflags &= ~CDC_NCM_FLAG_NDP_TO_END;
spin_unlock_bh(&ctx->mtx);
netif_tx_unlock_bh(dev->net);
return len;
}
static DEVICE_ATTR_RW(ndp_to_end);
#define NCM_PARM_ATTR(name, format, tocpu) \
static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *attr, char *buf) \
{ \
struct usbnet *dev = netdev_priv(to_net_dev(d)); \
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \
return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \
} \
static DEVICE_ATTR(name, 0444, cdc_ncm_show_##name, NULL)
NCM_PARM_ATTR(bmNtbFormatsSupported, "0x%04x", le16_to_cpu);
NCM_PARM_ATTR(dwNtbInMaxSize, "%u", le32_to_cpu);
NCM_PARM_ATTR(wNdpInDivisor, "%u", le16_to_cpu);
NCM_PARM_ATTR(wNdpInPayloadRemainder, "%u", le16_to_cpu);
NCM_PARM_ATTR(wNdpInAlignment, "%u", le16_to_cpu);
NCM_PARM_ATTR(dwNtbOutMaxSize, "%u", le32_to_cpu);
NCM_PARM_ATTR(wNdpOutDivisor, "%u", le16_to_cpu);
NCM_PARM_ATTR(wNdpOutPayloadRemainder, "%u", le16_to_cpu);
NCM_PARM_ATTR(wNdpOutAlignment, "%u", le16_to_cpu);
NCM_PARM_ATTR(wNtbOutMaxDatagrams, "%u", le16_to_cpu);
static struct attribute *cdc_ncm_sysfs_attrs[] = {
&dev_attr_min_tx_pkt.attr,
&dev_attr_ndp_to_end.attr,
&dev_attr_rx_max.attr,
&dev_attr_tx_max.attr,
&dev_attr_tx_timer_usecs.attr,
&dev_attr_bmNtbFormatsSupported.attr,
&dev_attr_dwNtbInMaxSize.attr,
&dev_attr_wNdpInDivisor.attr,
&dev_attr_wNdpInPayloadRemainder.attr,
&dev_attr_wNdpInAlignment.attr,
&dev_attr_dwNtbOutMaxSize.attr,
&dev_attr_wNdpOutDivisor.attr,
&dev_attr_wNdpOutPayloadRemainder.attr,
&dev_attr_wNdpOutAlignment.attr,
&dev_attr_wNtbOutMaxDatagrams.attr,
NULL,
};
static const struct attribute_group cdc_ncm_sysfs_attr_group = {
.name = "cdc_ncm",
.attrs = cdc_ncm_sysfs_attrs,
};
/* handle rx_max and tx_max changes */
static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
u32 val;
val = cdc_ncm_check_rx_max(dev, new_rx);
/* inform device about NTB input size changes */
if (val != ctx->rx_max) {
__le32 dwNtbInMaxSize = cpu_to_le32(val);
dev_info(&dev->intf->dev, "setting rx_max = %u\n", val);
/* tell device to use new size */
if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT
| USB_RECIP_INTERFACE,
0, iface_no, &dwNtbInMaxSize, 4) < 0)
dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
else
ctx->rx_max = val;
}
/* usbnet use these values for sizing rx queues */
if (dev->rx_urb_size != ctx->rx_max) {
dev->rx_urb_size = ctx->rx_max;
if (netif_running(dev->net))
usbnet_unlink_rx_urbs(dev);
}
val = cdc_ncm_check_tx_max(dev, new_tx);
if (val != ctx->tx_max)
dev_info(&dev->intf->dev, "setting tx_max = %u\n", val);
/* Adding a pad byte here if necessary simplifies the handling
* in cdc_ncm_fill_tx_frame, making tx_max always represent
* the real skb max size.
*
* We cannot use dev->maxpacket here because this is called from
* .bind which is called before usbnet sets up dev->maxpacket
*/
if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
val % usb_maxpacket(dev->udev, dev->out) == 0)
val++;
/* we might need to flush any pending tx buffers if running */
if (netif_running(dev->net) && val > ctx->tx_max) {
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
/* make sure tx_curr_skb is reallocated if it was empty */
if (ctx->tx_curr_skb) {
dev_kfree_skb_any(ctx->tx_curr_skb);
ctx->tx_curr_skb = NULL;
}
ctx->tx_max = val;
netif_tx_unlock_bh(dev->net);
} else {
ctx->tx_max = val;
}
dev->hard_mtu = ctx->tx_max;
/* max qlen depend on hard_mtu and rx_urb_size */
usbnet_update_max_qlen(dev);
/* never pad more than 3 full USB packets per transfer */
ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out),
CDC_NCM_MIN_TX_PKT, ctx->tx_max);
}
/* helpers for NCM and MBIM differences */
static u8 cdc_ncm_flags(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
return ctx->mbim_desc->bmNetworkCapabilities;
if (ctx->func_desc)
return ctx->func_desc->bmNetworkCapabilities;
return 0;
}
static int cdc_ncm_eth_hlen(struct usbnet *dev)
{
if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
return 0;
return ETH_HLEN;
}
static u32 cdc_ncm_min_dgram_size(struct usbnet *dev)
{
if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
return CDC_MBIM_MIN_DATAGRAM_SIZE;
return CDC_NCM_MIN_DATAGRAM_SIZE;
}
static u32 cdc_ncm_max_dgram_size(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
if (ctx->ether_desc)
return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
return CDC_NCM_MAX_DATAGRAM_SIZE;
}
/* initial one-time device setup. MUST be called with the data interface
* in altsetting 0
*/
static int cdc_ncm_init(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
int err;
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
0, iface_no, &ctx->ncm_parm,
sizeof(ctx->ncm_parm));
if (err < 0) {
dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
return err; /* GET_NTB_PARAMETERS is required */
}
/* set CRC Mode */
if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) {
dev_dbg(&dev->intf->dev, "Setting CRC mode off\n");
err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
USB_TYPE_CLASS | USB_DIR_OUT
| USB_RECIP_INTERFACE,
USB_CDC_NCM_CRC_NOT_APPENDED,
iface_no, NULL, 0);
if (err < 0)
dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
}
/* use ndp16 by default */
ctx->is_ndp16 = 1;
/* set NTB format, if both formats are supported.
*
* "The host shall only send this command while the NCM Data
* Interface is in alternate setting 0."
*/
if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
USB_CDC_NCM_NTB32_SUPPORTED) {
if (ctx->drvflags & CDC_NCM_FLAG_PREFER_NTB32) {
ctx->is_ndp16 = 0;
dev_dbg(&dev->intf->dev, "Setting NTB format to 32-bit\n");
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
USB_TYPE_CLASS | USB_DIR_OUT
| USB_RECIP_INTERFACE,
USB_CDC_NCM_NTB32_FORMAT,
iface_no, NULL, 0);
} else {
ctx->is_ndp16 = 1;
dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
USB_TYPE_CLASS | USB_DIR_OUT
| USB_RECIP_INTERFACE,
USB_CDC_NCM_NTB16_FORMAT,
iface_no, NULL, 0);
}
if (err < 0) {
ctx->is_ndp16 = 1;
dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
}
}
/* set initial device values */
ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
dev_dbg(&dev->intf->dev,
"dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
/* max count of tx datagrams */
if ((ctx->tx_max_datagrams == 0) ||
(ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
/* set up maximum NDP size */
if (ctx->is_ndp16)
ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
else
ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp32) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe32);
/* initial coalescing timer interval */
ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
return 0;
}
/* set a new max datagram size */
static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
__le16 max_datagram_size;
u16 mbim_mtu;
int err;
/* set default based on descriptors */
ctx->max_datagram_size = clamp_t(u32, new_size,
cdc_ncm_min_dgram_size(dev),
CDC_NCM_MAX_DATAGRAM_SIZE);
/* inform the device about the selected Max Datagram Size? */
if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
goto out;
/* read current mtu value from device */
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err != sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out;
}
if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
goto out;
max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err < 0)
dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
out:
/* set MTU to max supported by the device if necessary */
dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
/* do not exceed operator preferred MTU */
if (ctx->mbim_extended_desc) {
mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
dev->net->mtu = mbim_mtu;
}
}
static void cdc_ncm_fix_modulus(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u32 val;
/*
* verify that the structure alignment is:
* - power of two
* - not greater than the maximum transmit length
* - not less than four bytes
*/
val = ctx->tx_ndp_modulus;
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
dev_dbg(&dev->intf->dev, "Using default alignment: 4 bytes\n");
ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
/*
* verify that the payload alignment is:
* - power of two
* - not greater than the maximum transmit length
* - not less than four bytes
*/
val = ctx->tx_modulus;
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
dev_dbg(&dev->intf->dev, "Using default transmit modulus: 4 bytes\n");
ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
/* verify the payload remainder */
if (ctx->tx_remainder >= ctx->tx_modulus) {
dev_dbg(&dev->intf->dev, "Using default transmit remainder: 0 bytes\n");
ctx->tx_remainder = 0;
}
/* adjust TX-remainder according to NCM specification. */
ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) &
(ctx->tx_modulus - 1));
}
static int cdc_ncm_setup(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u32 def_rx, def_tx;
/* be conservative when selecting initial buffer size to
* increase the number of hosts this will work for
*/
def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX,
le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
/* clamp rx_max and tx_max and inform device */
cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
/* sanitize the modulus and remainder values */
cdc_ncm_fix_modulus(dev);
/* set max datagram size */
cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev));
return 0;
}
static void
cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
{
struct usb_host_endpoint *e, *in = NULL, *out = NULL;
u8 ep;
for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
e = intf->cur_altsetting->endpoint + ep;
/* ignore endpoints which cannot transfer data */
if (!usb_endpoint_maxp(&e->desc))
continue;
switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
if (usb_endpoint_dir_in(&e->desc)) {
if (!dev->status)
dev->status = e;
}
break;
case USB_ENDPOINT_XFER_BULK:
if (usb_endpoint_dir_in(&e->desc)) {
if (!in)
in = e;
} else {
if (!out)
out = e;
}
break;
default:
break;
}
}
if (in && !dev->in)
dev->in = usb_rcvbulkpipe(dev->udev,
in->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
if (out && !dev->out)
dev->out = usb_sndbulkpipe(dev->udev,
out->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
}
static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
{
if (ctx == NULL)
return;
if (ctx->tx_rem_skb != NULL) {
dev_kfree_skb_any(ctx->tx_rem_skb);
ctx->tx_rem_skb = NULL;
}
if (ctx->tx_curr_skb != NULL) {
dev_kfree_skb_any(ctx->tx_curr_skb);
ctx->tx_curr_skb = NULL;
}
if (ctx->is_ndp16)
kfree(ctx->delayed_ndp16);
else
kfree(ctx->delayed_ndp32);
kfree(ctx);
}
/* we need to override the usbnet change_mtu ndo for two reasons:
* - respect the negotiated maximum datagram size
* - avoid unwanted changes to rx and tx buffers
*/
int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
WRITE_ONCE(net->mtu, new_mtu);
cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
return 0;
}
EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
static const struct net_device_ops cdc_ncm_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
u8 *buf;
int len;
int temp;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
tasklet_setup(&ctx->bh, cdc_ncm_txpath_bh);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
/* store ctx pointer in device data field */
dev->data[0] = (unsigned long)ctx;
/* only the control interface can be successfully probed */
ctx->control = intf;
/* get some pointers */
driver = driver_of(intf);
buf = intf->cur_altsetting->extra;
len = intf->cur_altsetting->extralen;
/* parse through descriptors associated with control interface */
cdc_parse_cdc_header(&hdr, intf, buf, len);
net: cdc_ncm: fix NULL pointer deref in cdc_ncm_bind_common Commit 77b0a099674a ("cdc-ncm: use common parser") added a dangerous new trust in the CDC functional descriptors presented by the device, unconditionally assuming that any device handled by the driver has a CDC Union descriptor. This descriptor is required by the NCM and MBIM specs, but crashing on non-compliant devices is still unacceptable. Not only will that allow malicious devices to crash the kernel, but in this case it is also well known that there are non-compliant real devices on the market - as shown by the comment accompanying the IAD workaround in the same function. The Sierra Wireless EM7305 is an example of such device, having a CDC header and a CDC MBIM descriptor but no CDC Union: Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 12 bAlternateSetting 0 bNumEndpoints 1 bInterfaceClass 2 Communications bInterfaceSubClass 14 bInterfaceProtocol 0 iInterface 0 CDC Header: bcdCDC 1.10 CDC MBIM: bcdMBIMVersion 1.00 wMaxControlMessage 4096 bNumberFilters 16 bMaxFilterSize 128 wMaxSegmentSize 4064 bmNetworkCapabilities 0x20 8-byte ntb input size Endpoint Descriptor: .. The conversion to a common parser also left the local cdc_union variable untouched. This caused the IAD workaround code to be applied to all devices with an IAD descriptor, which was never intended. Finish the conversion by testing for hdr.usb_cdc_union_desc instead. Cc: Oliver Neukum <oneukum@suse.com> Fixes: 77b0a099674a ("cdc-ncm: use common parser") Signed-off-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-11-23 06:32:10 -07:00
if (hdr.usb_cdc_union_desc)
ctx->data = usb_ifnum_to_if(dev->udev,
hdr.usb_cdc_union_desc->bSlaveInterface0);
ctx->ether_desc = hdr.usb_cdc_ether_desc;
ctx->func_desc = hdr.usb_cdc_ncm_desc;
ctx->mbim_desc = hdr.usb_cdc_mbim_desc;
ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc;
/* some buggy devices have an IAD but no CDC Union */
net: cdc_ncm: fix NULL pointer deref in cdc_ncm_bind_common Commit 77b0a099674a ("cdc-ncm: use common parser") added a dangerous new trust in the CDC functional descriptors presented by the device, unconditionally assuming that any device handled by the driver has a CDC Union descriptor. This descriptor is required by the NCM and MBIM specs, but crashing on non-compliant devices is still unacceptable. Not only will that allow malicious devices to crash the kernel, but in this case it is also well known that there are non-compliant real devices on the market - as shown by the comment accompanying the IAD workaround in the same function. The Sierra Wireless EM7305 is an example of such device, having a CDC header and a CDC MBIM descriptor but no CDC Union: Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 12 bAlternateSetting 0 bNumEndpoints 1 bInterfaceClass 2 Communications bInterfaceSubClass 14 bInterfaceProtocol 0 iInterface 0 CDC Header: bcdCDC 1.10 CDC MBIM: bcdMBIMVersion 1.00 wMaxControlMessage 4096 bNumberFilters 16 bMaxFilterSize 128 wMaxSegmentSize 4064 bmNetworkCapabilities 0x20 8-byte ntb input size Endpoint Descriptor: .. The conversion to a common parser also left the local cdc_union variable untouched. This caused the IAD workaround code to be applied to all devices with an IAD descriptor, which was never intended. Finish the conversion by testing for hdr.usb_cdc_union_desc instead. Cc: Oliver Neukum <oneukum@suse.com> Fixes: 77b0a099674a ("cdc-ncm: use common parser") Signed-off-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-11-23 06:32:10 -07:00
if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
}
/* check if we got everything */
if (!ctx->data) {
dev_err(&intf->dev, "CDC Union missing and no IAD found\n");
goto error;
}
if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
if (!ctx->mbim_desc) {
dev_err(&intf->dev, "MBIM functional descriptor missing\n");
goto error;
}
} else {
if (!ctx->ether_desc || !ctx->func_desc) {
dev_err(&intf->dev, "NCM or ECM functional descriptors missing\n");
goto error;
}
}
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
temp = usb_driver_claim_interface(driver, ctx->data, dev);
if (temp) {
dev_err(&intf->dev, "failed to claim data intf\n");
goto error;
}
}
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* Device-specific flags */
ctx->drvflags = drvflags;
/* Reset data interface. Some devices will not reset properly
* unless they are configured first. Toggle the altsetting to
* force a reset.
* Some other devices do not work properly with this procedure
* that can be avoided using quirk CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE
*/
if (!(ctx->drvflags & CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE))
usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
}
/* initialize basic device settings */
if (cdc_ncm_init(dev))
goto error2;
/* Some firmwares need a pause here or they will silently fail
* to set up the interface properly. This value was decided
* empirically on a Sierra Wireless MC7455 running 02.08.02.00
* firmware.
*/
usleep_range(10000, 20000);
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
}
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out ||
(!dev->status && dev->driver_info->flags & FLAG_LINK_INTR)) {
dev_dbg(&intf->dev, "failed to collect endpoints\n");
goto error2;
}
usb_set_intfdata(ctx->control, dev);
if (ctx->ether_desc) {
temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
if (temp) {
dev_err(&intf->dev, "failed to get mac address\n");
goto error2;
}
dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
}
/* finish setting up the device specific data */
cdc_ncm_setup(dev);
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
if (ctx->is_ndp16) {
ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
if (!ctx->delayed_ndp16)
goto error2;
} else {
ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
if (!ctx->delayed_ndp32)
goto error2;
}
dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
}
/* override ethtool_ops */
dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
/* add our sysfs attrs */
dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
/* must handle MTU changes */
dev->net->netdev_ops = &cdc_ncm_netdev_ops;
dev->net->max_mtu = cdc_ncm_max_dgram_size(dev) - cdc_ncm_eth_hlen(dev);
return 0;
error2:
usb_set_intfdata(ctx->control, NULL);
usb_set_intfdata(ctx->data, NULL);
if (ctx->data != ctx->control)
usb_driver_release_interface(driver, ctx->data);
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
dev_info(&intf->dev, "bind() failure\n");
return -ENODEV;
}
EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
struct usb_driver *driver = driver_of(intf);
if (ctx == NULL)
return; /* no setup */
atomic_set(&ctx->stop, 1);
hrtimer_cancel(&ctx->tx_timer);
tasklet_kill(&ctx->bh);
/* handle devices with combined control and data interface */
if (ctx->control == ctx->data)
ctx->data = NULL;
/* disconnect master --> disconnect slave */
if (intf == ctx->control && ctx->data) {
usb_set_intfdata(ctx->data, NULL);
usb_driver_release_interface(driver, ctx->data);
ctx->data = NULL;
} else if (intf == ctx->data && ctx->control) {
usb_set_intfdata(ctx->control, NULL);
usb_driver_release_interface(driver, ctx->control);
ctx->control = NULL;
}
usb_set_intfdata(intf, NULL);
cdc_ncm_free(ctx);
}
EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
/* Return the number of the MBIM control interface altsetting iff it
* is preferred and available,
*/
u8 cdc_ncm_select_altsetting(struct usb_interface *intf)
{
struct usb_host_interface *alt;
/* The MBIM spec defines a NCM compatible default altsetting,
* which we may have matched:
*
* "Functions that implement both NCM 1.0 and MBIM (an
* NCM/MBIM function) according to this recommendation
* shall provide two alternate settings for the
* Communication Interface. Alternate setting 0, and the
* associated class and endpoint descriptors, shall be
* constructed according to the rules given for the
* Communication Interface in section 5 of [USBNCM10].
* Alternate setting 1, and the associated class and
* endpoint descriptors, shall be constructed according to
* the rules given in section 6 (USB Device Model) of this
* specification."
*/
if (intf->num_altsetting < 2)
return intf->cur_altsetting->desc.bAlternateSetting;
if (prefer_mbim) {
alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
if (alt && cdc_ncm_comm_intf_is_mbim(alt))
return CDC_NCM_COMM_ALTSETTING_MBIM;
}
return CDC_NCM_COMM_ALTSETTING_NCM;
}
EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
{
/* MBIM backwards compatible function? */
if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
/* The NCM data altsetting is fixed, so we hard-coded it.
* Additionally, generic NCM devices are assumed to accept arbitrarily
* placed NDP.
*/
return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
}
static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
{
size_t align = ALIGN(skb->len, modulus) - skb->len + remainder;
if (skb->len + align > max)
align = max - skb->len;
if (align && skb_tailroom(skb) >= align)
skb_put_zero(skb, align);
}
/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
* allocating a new one within skb
*/
static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
{
struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
/* If NDP should be moved to the end of the NCM package, we can't follow the
* NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
* the wNdpIndex field in the header is actually not consistent with reality. It will be later.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
if (ctx->delayed_ndp16->dwSignature == sign)
return ctx->delayed_ndp16;
/* We can only push a single NDP to the end. Return
* NULL to send what we've already got and queue this
* skb for later.
*/
else if (ctx->delayed_ndp16->dwSignature)
return NULL;
}
/* follow the chain of NDPs, looking for a match */
while (ndpoffset) {
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
if (ndp16->dwSignature == sign)
return ndp16;
ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
}
/* align new NDP */
if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
/* verify that there is room for the NDP and the datagram (reserve) */
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
if ((ctx->tx_curr_size - skb->len - reserve) < ctx->max_ndp_size)
return NULL;
/* link to it */
if (ndp16)
ndp16->wNextNdpIndex = cpu_to_le16(skb->len);
else
nth16->wNdpIndex = cpu_to_le16(skb->len);
/* push a new empty NDP */
if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
ndp16 = skb_put_zero(skb, ctx->max_ndp_size);
else
ndp16 = ctx->delayed_ndp16;
ndp16->dwSignature = sign;
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
return ndp16;
}
static struct usb_cdc_ncm_ndp32 *cdc_ncm_ndp32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
{
struct usb_cdc_ncm_ndp32 *ndp32 = NULL;
struct usb_cdc_ncm_nth32 *nth32 = (void *)skb->data;
size_t ndpoffset = le32_to_cpu(nth32->dwNdpIndex);
/* If NDP should be moved to the end of the NCM package, we can't follow the
* NTH32 header as we would normally do. NDP isn't written to the SKB yet, and
* the wNdpIndex field in the header is actually not consistent with reality. It will be later.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
if (ctx->delayed_ndp32->dwSignature == sign)
return ctx->delayed_ndp32;
/* We can only push a single NDP to the end. Return
* NULL to send what we've already got and queue this
* skb for later.
*/
else if (ctx->delayed_ndp32->dwSignature)
return NULL;
}
/* follow the chain of NDPs, looking for a match */
while (ndpoffset) {
ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb->data + ndpoffset);
if (ndp32->dwSignature == sign)
return ndp32;
ndpoffset = le32_to_cpu(ndp32->dwNextNdpIndex);
}
/* align new NDP */
if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
/* verify that there is room for the NDP and the datagram (reserve) */
if ((ctx->tx_curr_size - skb->len - reserve) < ctx->max_ndp_size)
return NULL;
/* link to it */
if (ndp32)
ndp32->dwNextNdpIndex = cpu_to_le32(skb->len);
else
nth32->dwNdpIndex = cpu_to_le32(skb->len);
/* push a new empty NDP */
if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
ndp32 = skb_put_zero(skb, ctx->max_ndp_size);
else
ndp32 = ctx->delayed_ndp32;
ndp32->dwSignature = sign;
ndp32->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp32) + sizeof(struct usb_cdc_ncm_dpe32));
return ndp32;
}
struct sk_buff *
cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
union {
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_nth32 *nth32;
} nth;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
} ndp;
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
u32 delayed_ndp_size;
size_t padding_count;
/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
net: cdc_ncm: correct overhead in delayed_ndp_size Aligning to tx_ndp_modulus is not sufficient because the next align call can be cdc_ncm_align_tail, which can add up to ctx->tx_modulus + ctx->tx_remainder - 1 bytes. This used to lead to occasional crashes on a Huawei 909s-120 LTE module as follows: - the condition marked /* if there is a remaining skb [...] */ is true so the swaps happen - skb_out is set from ctx->tx_curr_skb - skb_out->len is exactly 0x3f52 - ctx->tx_curr_size is 0x4000 and delayed_ndp_size is 0xac (note that the sum of skb_out->len and delayed_ndp_size is 0x3ffe) - the for loop over n is executed once - the cdc_ncm_align_tail call marked /* align beginning of next frame */ increases skb_out->len to 0x3f56 (the sum is now 0x4002) - the condition marked /* check if we had enough room left [...] */ is false so we break out of the loop - the condition marked /* If requested, put NDP at end of frame. */ is true so the NDP is written into skb_out - now skb_out->len is 0x4002, so padding_count is minus two interpreted as an unsigned number, which is used as the length argument to memset, leading to a crash with various symptoms but usually including > Call Trace: > <IRQ> > cdc_ncm_fill_tx_frame+0x83a/0x970 [cdc_ncm] > cdc_mbim_tx_fixup+0x1d9/0x240 [cdc_mbim] > usbnet_start_xmit+0x5d/0x720 [usbnet] The cdc_ncm_align_tail call first aligns on a ctx->tx_modulus boundary (adding at most ctx->tx_modulus-1 bytes), then adds ctx->tx_remainder bytes. Alternatively, the next alignment call can occur in cdc_ncm_ndp16 or cdc_ncm_ndp32, in which case at most ctx->tx_ndp_modulus-1 bytes are added. A similar problem has occurred before, and the code is nontrivial to reason about, so add a guard before the crashing call. By that time it is too late to prevent any memory corruption (we'll have written past the end of the buffer already) but we can at least try to get a warning written into an on-disk log by avoiding the hard crash caused by padding past the buffer with a huge number of zeros. Signed-off-by: Jouni K. Seppänen <jks@iki.fi> Fixes: 4a0e3e989d66 ("cdc_ncm: Add support for moving NDP to end of NCM frame") Link: https://bugzilla.kernel.org/show_bug.cgi?id=209407 Reported-by: kernel test robot <lkp@intel.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-01-04 21:52:49 -07:00
delayed_ndp_size = ctx->max_ndp_size +
max_t(u32,
ctx->tx_ndp_modulus,
ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
/* if there is a remaining skb, it gets priority */
if (skb != NULL) {
swap(skb, ctx->tx_rem_skb);
swap(sign, ctx->tx_rem_sign);
} else {
ready2send = 1;
}
/* check if we are resuming an OUT skb */
skb_out = ctx->tx_curr_skb;
/* allocate a new OUT skb */
if (!skb_out) {
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
if (ctx->tx_low_mem_val == 0) {
ctx->tx_curr_size = ctx->tx_max;
skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
/* If the memory allocation fails we will wait longer
* each time before attempting another full size
* allocation again to not overload the system
* further.
*/
if (skb_out == NULL) {
net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize Currently in cdc_ncm_check_tx_max(), if dwNtbOutMaxSize is lower than the calculated "min" value, but greater than zero, the logic sets tx_max to dwNtbOutMaxSize. This is then used to allocate a new SKB in cdc_ncm_fill_tx_frame() where all the data is handled. For small values of dwNtbOutMaxSize the memory allocated during alloc_skb(dwNtbOutMaxSize, GFP_ATOMIC) will have the same size, due to how size is aligned at alloc time: size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); Thus we hit the same bug that we tried to squash with commit 2be6d4d16a084 ("net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero") Low values of dwNtbOutMaxSize do not cause an issue presently because at alloc_skb() time more memory (512b) is allocated than required for the SKB headers alone (320b), leaving some space (512b - 320b = 192b) for CDC data (172b). However, if more elements (for example 3 x u64 = [24b]) were added to one of the SKB header structs, say 'struct skb_shared_info', increasing its original size (320b [320b aligned]) to something larger (344b [384b aligned]), then suddenly the CDC data (172b) no longer fits in the spare SKB data area (512b - 384b = 128b). Consequently the SKB bounds checking semantics fails and panics: skbuff: skb_over_panic: text:ffffffff831f755b len:184 put:172 head:ffff88811f1c6c00 data:ffff88811f1c6c00 tail:0xb8 end:0x80 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:113! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 57 Comm: kworker/0:2 Not tainted 5.15.106-syzkaller-00249-g19c0ed55a470 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/14/2023 Workqueue: mld mld_ifc_work RIP: 0010:skb_panic net/core/skbuff.c:113 [inline] RIP: 0010:skb_over_panic+0x14c/0x150 net/core/skbuff.c:118 [snip] Call Trace: <TASK> skb_put+0x151/0x210 net/core/skbuff.c:2047 skb_put_zero include/linux/skbuff.h:2422 [inline] cdc_ncm_ndp16 drivers/net/usb/cdc_ncm.c:1131 [inline] cdc_ncm_fill_tx_frame+0x11ab/0x3da0 drivers/net/usb/cdc_ncm.c:1308 cdc_ncm_tx_fixup+0xa3/0x100 Deal with too low values of dwNtbOutMaxSize, clamp it in the range [USB_CDC_NCM_NTB_MIN_OUT_SIZE, CDC_NCM_NTB_MAX_SIZE_TX]. We ensure enough data space is allocated to handle CDC data by making sure dwNtbOutMaxSize is not smaller than USB_CDC_NCM_NTB_MIN_OUT_SIZE. Fixes: 289507d3364f ("net: cdc_ncm: use sysfs for rx/tx aggregation tuning") Cc: stable@vger.kernel.org Reported-by: syzbot+9f575a1f15fc0c01ed69@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=b982f1059506db48409d Link: https://lore.kernel.org/all/20211202143437.1411410-1-lee.jones@linaro.org/ Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org> Reviewed-by: Simon Horman <simon.horman@corigine.com> Link: https://lore.kernel.org/r/20230517133808.1873695-2-tudor.ambarus@linaro.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-05-17 06:38:08 -07:00
/* If even the smallest allocation fails, abort. */
if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE)
goto alloc_failed;
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1,
(unsigned)CDC_NCM_LOW_MEM_MAX_CNT);
ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt;
}
}
if (skb_out == NULL) {
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
/* See if a very small allocation is possible.
* We will send this packet immediately and hope
* that there is more memory available later.
*/
if (skb)
ctx->tx_curr_size = max(skb->len,
(u32)USB_CDC_NCM_NTB_MIN_OUT_SIZE);
else
ctx->tx_curr_size = USB_CDC_NCM_NTB_MIN_OUT_SIZE;
skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
/* No allocation possible so we will abort */
net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize Currently in cdc_ncm_check_tx_max(), if dwNtbOutMaxSize is lower than the calculated "min" value, but greater than zero, the logic sets tx_max to dwNtbOutMaxSize. This is then used to allocate a new SKB in cdc_ncm_fill_tx_frame() where all the data is handled. For small values of dwNtbOutMaxSize the memory allocated during alloc_skb(dwNtbOutMaxSize, GFP_ATOMIC) will have the same size, due to how size is aligned at alloc time: size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); Thus we hit the same bug that we tried to squash with commit 2be6d4d16a084 ("net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero") Low values of dwNtbOutMaxSize do not cause an issue presently because at alloc_skb() time more memory (512b) is allocated than required for the SKB headers alone (320b), leaving some space (512b - 320b = 192b) for CDC data (172b). However, if more elements (for example 3 x u64 = [24b]) were added to one of the SKB header structs, say 'struct skb_shared_info', increasing its original size (320b [320b aligned]) to something larger (344b [384b aligned]), then suddenly the CDC data (172b) no longer fits in the spare SKB data area (512b - 384b = 128b). Consequently the SKB bounds checking semantics fails and panics: skbuff: skb_over_panic: text:ffffffff831f755b len:184 put:172 head:ffff88811f1c6c00 data:ffff88811f1c6c00 tail:0xb8 end:0x80 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:113! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 57 Comm: kworker/0:2 Not tainted 5.15.106-syzkaller-00249-g19c0ed55a470 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/14/2023 Workqueue: mld mld_ifc_work RIP: 0010:skb_panic net/core/skbuff.c:113 [inline] RIP: 0010:skb_over_panic+0x14c/0x150 net/core/skbuff.c:118 [snip] Call Trace: <TASK> skb_put+0x151/0x210 net/core/skbuff.c:2047 skb_put_zero include/linux/skbuff.h:2422 [inline] cdc_ncm_ndp16 drivers/net/usb/cdc_ncm.c:1131 [inline] cdc_ncm_fill_tx_frame+0x11ab/0x3da0 drivers/net/usb/cdc_ncm.c:1308 cdc_ncm_tx_fixup+0xa3/0x100 Deal with too low values of dwNtbOutMaxSize, clamp it in the range [USB_CDC_NCM_NTB_MIN_OUT_SIZE, CDC_NCM_NTB_MAX_SIZE_TX]. We ensure enough data space is allocated to handle CDC data by making sure dwNtbOutMaxSize is not smaller than USB_CDC_NCM_NTB_MIN_OUT_SIZE. Fixes: 289507d3364f ("net: cdc_ncm: use sysfs for rx/tx aggregation tuning") Cc: stable@vger.kernel.org Reported-by: syzbot+9f575a1f15fc0c01ed69@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=b982f1059506db48409d Link: https://lore.kernel.org/all/20211202143437.1411410-1-lee.jones@linaro.org/ Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org> Reviewed-by: Simon Horman <simon.horman@corigine.com> Link: https://lore.kernel.org/r/20230517133808.1873695-2-tudor.ambarus@linaro.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-05-17 06:38:08 -07:00
if (!skb_out)
goto alloc_failed;
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
ctx->tx_low_mem_val--;
}
if (ctx->is_ndp16) {
/* fill out the initial 16-bit NTB header */
nth.nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
nth.nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
nth.nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
nth.nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
} else {
/* fill out the initial 32-bit NTB header */
nth.nth32 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth32));
nth.nth32->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH32_SIGN);
nth.nth32->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth32));
nth.nth32->wSequence = cpu_to_le16(ctx->tx_seq++);
}
/* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
/* recent payload counter for this skb_out */
ctx->tx_curr_frame_payload = 0;
}
for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
/* send any remaining skb first */
if (skb == NULL) {
skb = ctx->tx_rem_skb;
sign = ctx->tx_rem_sign;
ctx->tx_rem_skb = NULL;
/* check for end of skb */
if (skb == NULL)
break;
}
/* get the appropriate NDP for this skb */
if (ctx->is_ndp16)
ndp.ndp16 = cdc_ncm_ndp16(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
else
ndp.ndp32 = cdc_ncm_ndp32(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
/* align beginning of next frame */
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_curr_size);
/* check if we had enough room left for both NDP and frame */
if ((ctx->is_ndp16 && !ndp.ndp16) || (!ctx->is_ndp16 && !ndp.ndp32) ||
skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
skb = NULL;
dev->net->stats.tx_dropped++;
} else {
/* no room for skb - store for later */
if (ctx->tx_rem_skb != NULL) {
dev_kfree_skb_any(ctx->tx_rem_skb);
dev->net->stats.tx_dropped++;
}
ctx->tx_rem_skb = skb;
ctx->tx_rem_sign = sign;
skb = NULL;
ready2send = 1;
ctx->tx_reason_ntb_full++; /* count reason for transmitting */
}
break;
}
/* calculate frame number within this NDP */
if (ctx->is_ndp16) {
ndplen = le16_to_cpu(ndp.ndp16->wLength);
index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
/* OK, add this skb */
ndp.ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
ndp.ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
ndp.ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
} else {
ndplen = le16_to_cpu(ndp.ndp32->wLength);
index = (ndplen - sizeof(struct usb_cdc_ncm_ndp32)) / sizeof(struct usb_cdc_ncm_dpe32) - 1;
ndp.ndp32->dpe32[index].dwDatagramLength = cpu_to_le32(skb->len);
ndp.ndp32->dpe32[index].dwDatagramIndex = cpu_to_le32(skb_out->len);
ndp.ndp32->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe32));
}
skb_put_data(skb_out, skb->data, skb->len);
ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
skb = NULL;
/* send now if this NDP is full */
if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
ready2send = 1;
ctx->tx_reason_ndp_full++; /* count reason for transmitting */
break;
}
}
/* free up any dangling skb */
if (skb != NULL) {
dev_kfree_skb_any(skb);
skb = NULL;
dev->net->stats.tx_dropped++;
}
ctx->tx_curr_frame_num = n;
if (n == 0) {
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
goto exit_no_skb;
} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) {
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
/* set the pending count */
if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT;
goto exit_no_skb;
} else {
if (n == ctx->tx_max_datagrams)
ctx->tx_reason_max_datagram++; /* count reason for transmitting */
/* frame goes out */
/* variables will be reset at next call */
}
/* If requested, put NDP at end of frame. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
if (ctx->is_ndp16) {
nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
nth.nth16->wNdpIndex = cpu_to_le16(skb_out->len);
skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
/* Zero out delayed NDP - signature checking will naturally fail. */
ndp.ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
} else {
nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
nth.nth32->dwNdpIndex = cpu_to_le32(skb_out->len);
skb_put_data(skb_out, ctx->delayed_ndp32, ctx->max_ndp_size);
ndp.ndp32 = memset(ctx->delayed_ndp32, 0, ctx->max_ndp_size);
}
}
/* If collected data size is less or equal ctx->min_tx_pkt
* bytes, we send buffers as it is. If we get more data, it
* would be more efficient for USB HS mobile device with DMA
* engine to receive a full size NTB, than canceling DMA
* transfer and receiving a short packet.
*
* This optimization support is pointless if we end up sending
* a ZLP after full sized NTBs.
*/
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
padding_count = ctx->tx_curr_size - skb_out->len;
net: cdc_ncm: correct overhead in delayed_ndp_size Aligning to tx_ndp_modulus is not sufficient because the next align call can be cdc_ncm_align_tail, which can add up to ctx->tx_modulus + ctx->tx_remainder - 1 bytes. This used to lead to occasional crashes on a Huawei 909s-120 LTE module as follows: - the condition marked /* if there is a remaining skb [...] */ is true so the swaps happen - skb_out is set from ctx->tx_curr_skb - skb_out->len is exactly 0x3f52 - ctx->tx_curr_size is 0x4000 and delayed_ndp_size is 0xac (note that the sum of skb_out->len and delayed_ndp_size is 0x3ffe) - the for loop over n is executed once - the cdc_ncm_align_tail call marked /* align beginning of next frame */ increases skb_out->len to 0x3f56 (the sum is now 0x4002) - the condition marked /* check if we had enough room left [...] */ is false so we break out of the loop - the condition marked /* If requested, put NDP at end of frame. */ is true so the NDP is written into skb_out - now skb_out->len is 0x4002, so padding_count is minus two interpreted as an unsigned number, which is used as the length argument to memset, leading to a crash with various symptoms but usually including > Call Trace: > <IRQ> > cdc_ncm_fill_tx_frame+0x83a/0x970 [cdc_ncm] > cdc_mbim_tx_fixup+0x1d9/0x240 [cdc_mbim] > usbnet_start_xmit+0x5d/0x720 [usbnet] The cdc_ncm_align_tail call first aligns on a ctx->tx_modulus boundary (adding at most ctx->tx_modulus-1 bytes), then adds ctx->tx_remainder bytes. Alternatively, the next alignment call can occur in cdc_ncm_ndp16 or cdc_ncm_ndp32, in which case at most ctx->tx_ndp_modulus-1 bytes are added. A similar problem has occurred before, and the code is nontrivial to reason about, so add a guard before the crashing call. By that time it is too late to prevent any memory corruption (we'll have written past the end of the buffer already) but we can at least try to get a warning written into an on-disk log by avoiding the hard crash caused by padding past the buffer with a huge number of zeros. Signed-off-by: Jouni K. Seppänen <jks@iki.fi> Fixes: 4a0e3e989d66 ("cdc_ncm: Add support for moving NDP to end of NCM frame") Link: https://bugzilla.kernel.org/show_bug.cgi?id=209407 Reported-by: kernel test robot <lkp@intel.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-01-04 21:52:49 -07:00
if (!WARN_ON(padding_count > ctx->tx_curr_size))
skb_put_zero(skb_out, padding_count);
net: cdc_ncm: Reduce memory use when kernel memory low The CDC-NCM driver can require large amounts of memory to create skb's and this can be a problem when the memory becomes fragmented. This especially affects embedded systems that have constrained resources but wish to maximise the throughput of CDC-NCM with 16KiB NTB's. The issue is after running for a while the kernel memory can become fragmented and it needs compacting. If the NTB allocation is needed before the memory has been compacted the atomic allocation can fail which can cause increased latency, large re-transmissions or disconnections depending upon the data being transmitted at the time. This situation occurs for less than a second until the kernel has compacted the memory but the failed devices can take a lot longer to recover from the failed TX packets. To ease this temporary situation I modified the CDC-NCM TX path to temporarily switch into a reduced memory mode which allocates an NTB that will fit into a USB_CDC_NCM_NTB_MIN_OUT_SIZE (default 2048 Bytes) sized memory block and only transmit NTB's with a single network frame until the memory situation is resolved. Each time this issue occurs we wait for an increasing number of reduced size allocations before requesting a full size one to not put additional pressure on a low memory system. Once the memory is compacted the CDC-NCM data can resume transmitting at the normal tx_max rate once again. Signed-off-by: Jim Baxter <jim_baxter@mentor.com> Reviewed-by: Bjørn Mork <bjorn@mork.no> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-28 13:35:29 -07:00
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
}
/* set final frame length */
if (ctx->is_ndp16) {
nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
nth.nth16->wBlockLength = cpu_to_le16(skb_out->len);
} else {
nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
nth.nth32->dwBlockLength = cpu_to_le32(skb_out->len);
}
/* return skb */
ctx->tx_curr_skb = NULL;
/* keep private stats: framing overhead and number of NTBs */
ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
ctx->tx_ntbs++;
/* usbnet will count all the framing overhead by default.
* Adjust the stats so that the tx_bytes counter show real
* payload data instead.
*/
usbnet_set_skb_tx_stats(skb_out, n,
(long)ctx->tx_curr_frame_payload - skb_out->len);
return skb_out;
net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize Currently in cdc_ncm_check_tx_max(), if dwNtbOutMaxSize is lower than the calculated "min" value, but greater than zero, the logic sets tx_max to dwNtbOutMaxSize. This is then used to allocate a new SKB in cdc_ncm_fill_tx_frame() where all the data is handled. For small values of dwNtbOutMaxSize the memory allocated during alloc_skb(dwNtbOutMaxSize, GFP_ATOMIC) will have the same size, due to how size is aligned at alloc time: size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); Thus we hit the same bug that we tried to squash with commit 2be6d4d16a084 ("net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero") Low values of dwNtbOutMaxSize do not cause an issue presently because at alloc_skb() time more memory (512b) is allocated than required for the SKB headers alone (320b), leaving some space (512b - 320b = 192b) for CDC data (172b). However, if more elements (for example 3 x u64 = [24b]) were added to one of the SKB header structs, say 'struct skb_shared_info', increasing its original size (320b [320b aligned]) to something larger (344b [384b aligned]), then suddenly the CDC data (172b) no longer fits in the spare SKB data area (512b - 384b = 128b). Consequently the SKB bounds checking semantics fails and panics: skbuff: skb_over_panic: text:ffffffff831f755b len:184 put:172 head:ffff88811f1c6c00 data:ffff88811f1c6c00 tail:0xb8 end:0x80 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:113! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 57 Comm: kworker/0:2 Not tainted 5.15.106-syzkaller-00249-g19c0ed55a470 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/14/2023 Workqueue: mld mld_ifc_work RIP: 0010:skb_panic net/core/skbuff.c:113 [inline] RIP: 0010:skb_over_panic+0x14c/0x150 net/core/skbuff.c:118 [snip] Call Trace: <TASK> skb_put+0x151/0x210 net/core/skbuff.c:2047 skb_put_zero include/linux/skbuff.h:2422 [inline] cdc_ncm_ndp16 drivers/net/usb/cdc_ncm.c:1131 [inline] cdc_ncm_fill_tx_frame+0x11ab/0x3da0 drivers/net/usb/cdc_ncm.c:1308 cdc_ncm_tx_fixup+0xa3/0x100 Deal with too low values of dwNtbOutMaxSize, clamp it in the range [USB_CDC_NCM_NTB_MIN_OUT_SIZE, CDC_NCM_NTB_MAX_SIZE_TX]. We ensure enough data space is allocated to handle CDC data by making sure dwNtbOutMaxSize is not smaller than USB_CDC_NCM_NTB_MIN_OUT_SIZE. Fixes: 289507d3364f ("net: cdc_ncm: use sysfs for rx/tx aggregation tuning") Cc: stable@vger.kernel.org Reported-by: syzbot+9f575a1f15fc0c01ed69@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=b982f1059506db48409d Link: https://lore.kernel.org/all/20211202143437.1411410-1-lee.jones@linaro.org/ Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org> Reviewed-by: Simon Horman <simon.horman@corigine.com> Link: https://lore.kernel.org/r/20230517133808.1873695-2-tudor.ambarus@linaro.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-05-17 06:38:08 -07:00
alloc_failed:
if (skb) {
dev_kfree_skb_any(skb);
dev->net->stats.tx_dropped++;
}
exit_no_skb:
/* Start timer, if there is a remaining non-empty skb */
if (ctx->tx_curr_skb != NULL && n > 0)
cdc_ncm_tx_timeout_start(ctx);
return NULL;
}
EXPORT_SYMBOL_GPL(cdc_ncm_fill_tx_frame);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
{
/* start timer, if not already started */
if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
hrtimer_start(&ctx->tx_timer,
ctx->timer_interval,
HRTIMER_MODE_REL);
}
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *timer)
{
struct cdc_ncm_ctx *ctx =
container_of(timer, struct cdc_ncm_ctx, tx_timer);
if (!atomic_read(&ctx->stop))
tasklet_schedule(&ctx->bh);
return HRTIMER_NORESTART;
}
static void cdc_ncm_txpath_bh(struct tasklet_struct *t)
{
struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh);
struct usbnet *dev = ctx->dev;
spin_lock(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
spin_unlock(&ctx->mtx);
} else if (dev->net != NULL) {
ctx->tx_reason_timeout++; /* count reason for transmitting */
spin_unlock(&ctx->mtx);
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
netif_tx_unlock_bh(dev->net);
} else {
spin_unlock(&ctx->mtx);
}
}
struct sk_buff *
cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct sk_buff *skb_out;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
/*
* The Ethernet API we are using does not support transmitting
* multiple Ethernet frames in a single call. This driver will
* accumulate multiple Ethernet frames and send out a larger
* USB frame when the USB buffer is full or when a single jiffies
* timeout happens.
*/
if (ctx == NULL)
goto error;
spin_lock_bh(&ctx->mtx);
if (ctx->is_ndp16)
skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
else
skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN));
spin_unlock_bh(&ctx->mtx);
return skb_out;
error:
if (skb != NULL)
dev_kfree_skb_any(skb);
return NULL;
}
EXPORT_SYMBOL_GPL(cdc_ncm_tx_fixup);
/* verify NTB header and return offset of first NDP, or negative error */
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
{
struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_nth16 *nth16;
int len;
int ret = -EINVAL;
if (ctx == NULL)
goto error;
if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) +
sizeof(struct usb_cdc_ncm_ndp16))) {
netif_dbg(dev, rx_err, dev->net, "frame too short\n");
goto error;
}
nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data;
if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid NTH16 signature <%#010x>\n",
le32_to_cpu(nth16->dwSignature));
goto error;
}
len = le16_to_cpu(nth16->wBlockLength);
if (len > ctx->rx_max) {
netif_dbg(dev, rx_err, dev->net,
"unsupported NTB block length %u/%u\n", len,
ctx->rx_max);
goto error;
}
if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&
(ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
!((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
netif_dbg(dev, rx_err, dev->net,
"sequence number glitch prev=%d curr=%d\n",
ctx->rx_seq, le16_to_cpu(nth16->wSequence));
}
ctx->rx_seq = le16_to_cpu(nth16->wSequence);
ret = le16_to_cpu(nth16->wNdpIndex);
error:
return ret;
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
{
struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_nth32 *nth32;
int len;
int ret = -EINVAL;
if (ctx == NULL)
goto error;
if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth32) +
sizeof(struct usb_cdc_ncm_ndp32))) {
netif_dbg(dev, rx_err, dev->net, "frame too short\n");
goto error;
}
nth32 = (struct usb_cdc_ncm_nth32 *)skb_in->data;
if (nth32->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH32_SIGN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid NTH32 signature <%#010x>\n",
le32_to_cpu(nth32->dwSignature));
goto error;
}
len = le32_to_cpu(nth32->dwBlockLength);
if (len > ctx->rx_max) {
netif_dbg(dev, rx_err, dev->net,
"unsupported NTB block length %u/%u\n", len,
ctx->rx_max);
goto error;
}
if ((ctx->rx_seq + 1) != le16_to_cpu(nth32->wSequence) &&
(ctx->rx_seq || le16_to_cpu(nth32->wSequence)) &&
!((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth32->wSequence))) {
netif_dbg(dev, rx_err, dev->net,
"sequence number glitch prev=%d curr=%d\n",
ctx->rx_seq, le16_to_cpu(nth32->wSequence));
}
ctx->rx_seq = le16_to_cpu(nth32->wSequence);
ret = le32_to_cpu(nth32->dwNdpIndex);
error:
return ret;
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth32);
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp16 *ndp16;
int ret = -EINVAL;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
ndpoffset);
goto error;
}
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
netif_dbg(dev, rx_err, dev->net, "invalid DPT16 length <%u>\n",
le16_to_cpu(ndp16->wLength));
goto error;
}
ret = ((le16_to_cpu(ndp16->wLength) -
sizeof(struct usb_cdc_ncm_ndp16)) /
sizeof(struct usb_cdc_ncm_dpe16));
ret--; /* we process NDP entries except for the last one */
if ((sizeof(struct usb_cdc_ncm_ndp16) +
ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
error:
return ret;
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset)
{
struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp32 *ndp32;
int ret = -EINVAL;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
ndpoffset);
goto error;
}
ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp32->wLength) < USB_CDC_NCM_NDP32_LENGTH_MIN) {
netif_dbg(dev, rx_err, dev->net, "invalid DPT32 length <%u>\n",
le16_to_cpu(ndp32->wLength));
goto error;
}
ret = ((le16_to_cpu(ndp32->wLength) -
sizeof(struct usb_cdc_ncm_ndp32)) /
sizeof(struct usb_cdc_ncm_dpe32));
ret--; /* we process NDP entries except for the last one */
if ((sizeof(struct usb_cdc_ncm_ndp32) +
ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
error:
return ret;
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp32);
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
unsigned int len;
int nframes;
int x;
unsigned int offset;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
} ndp;
union {
struct usb_cdc_ncm_dpe16 *dpe16;
struct usb_cdc_ncm_dpe32 *dpe32;
} dpe;
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
u32 payload = 0;
if (ctx->is_ndp16)
ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
else
ndpoffset = cdc_ncm_rx_verify_nth32(ctx, skb_in);
if (ndpoffset < 0)
goto error;
next_ndp:
if (ctx->is_ndp16) {
nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
if (nframes < 0)
goto error;
ndp.ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (ndp.ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid DPT16 signature <%#010x>\n",
le32_to_cpu(ndp.ndp16->dwSignature));
goto err_ndp;
}
dpe.dpe16 = ndp.ndp16->dpe16;
} else {
nframes = cdc_ncm_rx_verify_ndp32(skb_in, ndpoffset);
if (nframes < 0)
goto error;
ndp.ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
if (ndp.ndp32->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid DPT32 signature <%#010x>\n",
le32_to_cpu(ndp.ndp32->dwSignature));
goto err_ndp;
}
dpe.dpe32 = ndp.ndp32->dpe32;
}
for (x = 0; x < nframes; x++) {
if (ctx->is_ndp16) {
offset = le16_to_cpu(dpe.dpe16->wDatagramIndex);
len = le16_to_cpu(dpe.dpe16->wDatagramLength);
} else {
offset = le32_to_cpu(dpe.dpe32->dwDatagramIndex);
len = le32_to_cpu(dpe.dpe32->dwDatagramLength);
}
/*
* CDC NCM ch. 3.7
* All entries after first NULL entry are to be ignored
*/
if ((offset == 0) || (len == 0)) {
if (!x)
goto err_ndp; /* empty NTB */
break;
}
/* sanity checking - watch out for integer wrap*/
if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
x, offset, len, skb_in);
if (!x)
goto err_ndp;
break;
} else {
/* create a fresh copy to reduce truesize */
skb = netdev_alloc_skb_ip_align(dev->net, len);
if (!skb)
goto error;
skb_put_data(skb, skb_in->data + offset, len);
usbnet_skb_return(dev, skb);
payload += len; /* count payload bytes in this NTB */
}
if (ctx->is_ndp16)
dpe.dpe16++;
else
dpe.dpe32++;
}
err_ndp:
/* are there more NDPs to process? */
if (ctx->is_ndp16)
ndpoffset = le16_to_cpu(ndp.ndp16->wNextNdpIndex);
else
ndpoffset = le32_to_cpu(ndp.ndp32->dwNextNdpIndex);
if (ndpoffset && loopcount--)
goto next_ndp;
/* update stats */
ctx->rx_overhead += skb_in->len - payload;
ctx->rx_ntbs++;
return 1;
error:
return 0;
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_fixup);
static void
cdc_ncm_speed_change(struct usbnet *dev,
struct usb_cdc_speed_change *data)
{
/* RTL8156 shipped before 2021 sends notification about every 32ms. */
dev->rx_speed = le32_to_cpu(data->DLBitRRate);
dev->tx_speed = le32_to_cpu(data->ULBitRate);
}
static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
{
struct usb_cdc_notification *event;
if (urb->actual_length < sizeof(*event))
return;
/* test for split data in 8-byte chunks */
if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
cdc_ncm_speed_change(dev,
(struct usb_cdc_speed_change *)urb->transfer_buffer);
return;
}
event = urb->transfer_buffer;
switch (event->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
/*
* According to the CDC NCM specification ch.7.1
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
/* RTL8156 shipped before 2021 sends notification about
* every 32ms. Don't forward notification if state is same.
*/
net: usb: cdc_ncm: don't spew notifications RTL8156 sends notifications about every 32ms. Only display/log notifications when something changes. This issue has been reported by others: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1832472 https://lkml.org/lkml/2020/8/27/1083 ... [785962.779840] usb 1-1: new high-speed USB device number 5 using xhci_hcd [785962.929944] usb 1-1: New USB device found, idVendor=0bda, idProduct=8156, bcdDevice=30.00 [785962.929949] usb 1-1: New USB device strings: Mfr=1, Product=2, SerialNumber=6 [785962.929952] usb 1-1: Product: USB 10/100/1G/2.5G LAN [785962.929954] usb 1-1: Manufacturer: Realtek [785962.929956] usb 1-1: SerialNumber: 000000001 [785962.991755] usbcore: registered new interface driver cdc_ether [785963.017068] cdc_ncm 1-1:2.0: MAC-Address: 00:24:27:88:08:15 [785963.017072] cdc_ncm 1-1:2.0: setting rx_max = 16384 [785963.017169] cdc_ncm 1-1:2.0: setting tx_max = 16384 [785963.017682] cdc_ncm 1-1:2.0 usb0: register 'cdc_ncm' at usb-0000:00:14.0-1, CDC NCM, 00:24:27:88:08:15 [785963.019211] usbcore: registered new interface driver cdc_ncm [785963.023856] usbcore: registered new interface driver cdc_wdm [785963.025461] usbcore: registered new interface driver cdc_mbim [785963.038824] cdc_ncm 1-1:2.0 enx002427880815: renamed from usb0 [785963.089586] cdc_ncm 1-1:2.0 enx002427880815: network connection: disconnected [785963.121673] cdc_ncm 1-1:2.0 enx002427880815: network connection: disconnected [785963.153682] cdc_ncm 1-1:2.0 enx002427880815: network connection: disconnected ... This is about 2KB per second and will overwrite all contents of a 1MB dmesg buffer in under 10 minutes rendering them useless for debugging many kernel problems. This is also an extra 180 MB/day in /var/logs (or 1GB per week) rendering the majority of those logs useless too. When the link is up (expected state), spew amount is >2x higher: ... [786139.600992] cdc_ncm 2-1:2.0 enx002427880815: network connection: connected [786139.632997] cdc_ncm 2-1:2.0 enx002427880815: 2500 mbit/s downlink 2500 mbit/s uplink [786139.665097] cdc_ncm 2-1:2.0 enx002427880815: network connection: connected [786139.697100] cdc_ncm 2-1:2.0 enx002427880815: 2500 mbit/s downlink 2500 mbit/s uplink [786139.729094] cdc_ncm 2-1:2.0 enx002427880815: network connection: connected [786139.761108] cdc_ncm 2-1:2.0 enx002427880815: 2500 mbit/s downlink 2500 mbit/s uplink ... Chrome OS cannot support RTL8156 until this is fixed. Signed-off-by: Grant Grundler <grundler@chromium.org> Reviewed-by: Hayes Wang <hayeswang@realtek.com> Link: https://lore.kernel.org/r/20210120011208.3768105-1-grundler@chromium.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-19 18:12:08 -07:00
if (netif_carrier_ok(dev->net) != !!event->wValue)
usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
if (urb->actual_length < (sizeof(*event) +
sizeof(struct usb_cdc_speed_change)))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
cdc_ncm_speed_change(dev,
(struct usb_cdc_speed_change *)&event[1]);
break;
default:
dev_dbg(&dev->udev->dev,
"NCM: unexpected notification 0x%02x!\n",
event->bNotificationType);
break;
}
}
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM (NO ZLP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
net: cdc_ncm: switch to eth%d interface naming This is meant to make the host side cdc_ncm interface consistently named just like the older CDC protocols: cdc_ether & cdc_ecm (and even rndis_host), which all use 'FLAG_ETHER | FLAG_POINTTOPOINT'. include/linux/usb/usbnet.h: #define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */ #define FLAG_WLAN 0x0080 /* use "wlan%d" names */ #define FLAG_WWAN 0x0400 /* use "wwan%d" names */ #define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */ drivers/net/usb/usbnet.c @ line 1711: strcpy (net->name, "usb%d"); ... // heuristic: "usb%d" for links we know are two-host, // else "eth%d" when there's reasonable doubt. userspace // can rename the link if it knows better. if ((dev->driver_info->flags & FLAG_ETHER) != 0 && ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || (net->dev_addr [0] & 0x02) == 0)) strcpy (net->name, "eth%d"); /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0) strcpy(net->name, "wlan%d"); /* WWAN devices should always be named "wwan%d" */ if ((dev->driver_info->flags & FLAG_WWAN) != 0) strcpy(net->name, "wwan%d"); So by using ETHER | POINTTOPOINT the interface naming is either usb%d or eth%d based on the global uniqueness of the mac address of the device. Without this 2.5gbps ethernet dongles which all seem to use the cdc_ncm driver end up being called usb%d instead of eth%d even though they're definitely not two-host. (All 1gbps & 5gbps ethernet usb dongles I've tested don't hit this problem due to use of different drivers, primarily r8152 and aqc111) Fixes tag is based purely on git blame, and is really just here to make sure this hits LTS branches newer than v4.5. Cc: Lorenzo Colitti <lorenzo@google.com> Fixes: 4d06dd537f95 ("cdc_ncm: do not call usbnet_link_change from cdc_ncm_bind") Signed-off-by: Maciej Żenczykowski <maze@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-15 01:05:49 -07:00
| FLAG_LINK_INTR | FLAG_ETHER,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
.set_rx_mode = usbnet_cdc_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
static const struct driver_info cdc_ncm_zlp_info = {
.description = "CDC NCM (SEND ZLP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
| FLAG_LINK_INTR | FLAG_ETHER | FLAG_SEND_ZLP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
.set_rx_mode = usbnet_cdc_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
static const struct driver_info apple_tethering_interface_info = {
.description = "CDC NCM (Apple Tethering)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
| FLAG_LINK_INTR | FLAG_ETHER | FLAG_SEND_ZLP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
.set_rx_mode = usbnet_cdc_update_filter,
};
/* Same as apple_tethering_interface_info, but without FLAG_LINK_INTR */
static const struct driver_info apple_private_interface_info = {
.description = "CDC NCM (Apple Private)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
| FLAG_ETHER | FLAG_SEND_ZLP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
.set_rx_mode = usbnet_cdc_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_WWAN */
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
| FLAG_LINK_INTR | FLAG_WWAN,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
.set_rx_mode = usbnet_cdc_update_filter,
};
/* Same as wwan_info, but with FLAG_NOARP */
static const struct driver_info wwan_noarp_info = {
.description = "Mobile Broadband Network Device (NO ARP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
| FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
.set_rx_mode = usbnet_cdc_update_filter,
};
static const struct usb_device_id cdc_devs[] = {
/* iPhone */
{ USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 2),
.driver_info = (unsigned long)&apple_tethering_interface_info,
},
{ USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 4),
.driver_info = (unsigned long)&apple_private_interface_info,
},
/* iPad */
{ USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 2),
.driver_info = (unsigned long)&apple_tethering_interface_info,
},
{ USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 4),
.driver_info = (unsigned long)&apple_private_interface_info,
},
/* Ericsson MBM devices like F5521gw */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
.idVendor = 0x0bdb,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = (unsigned long) &wwan_info,
},
/* Telit LE910 V2 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_noarp_info,
},
/* DW5812 LTE Verizon Mobile Broadband Card
* Unlike DW5550 this device requires FLAG_NOARP
*/
{ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bb,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_noarp_info,
},
/* DW5813 LTE AT&T Mobile Broadband Card
* Unlike DW5550 this device requires FLAG_NOARP
*/
{ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bc,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_noarp_info,
},
/* Dell branded MBM devices like DW5550 */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
.idVendor = 0x413c,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = (unsigned long) &wwan_info,
},
/* Toshiba branded MBM devices */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
.idVendor = 0x0930,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = (unsigned long) &wwan_info,
},
/* tag Huawei devices as wwan */
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
},
/* Infineon(now Intel) HSPA Modem platform */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_noarp_info,
},
/* u-blox TOBY-L4 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
},
/* DisplayLink docking stations */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
.idVendor = 0x17e9,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = (unsigned long)&cdc_ncm_zlp_info,
},
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_ncm_info,
},
{
},
};
MODULE_DEVICE_TABLE(usb, cdc_devs);
static struct usb_driver cdc_ncm_driver = {
.name = "cdc_ncm",
.id_table = cdc_devs,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.reset_resume = usbnet_resume,
.supports_autosuspend = 1,
USB: Disable hub-initiated LPM for comms devices. Hub-initiated LPM is not good for USB communications devices. Comms devices should be able to tell when their link can go into a lower power state, because they know when an incoming transmission is finished. Ideally, these devices would slam their links into a lower power state, using the device-initiated LPM, after finishing the last packet of their data transfer. If we enable the idle timeouts for the parent hubs to enable hub-initiated LPM, we will get a lot of useless LPM packets on the bus as the devices reject LPM transitions when they're in the middle of receiving data. Worse, some devices might blindly accept the hub-initiated LPM and power down their radios while they're in the middle of receiving a transmission. The Intel Windows folks are disabling hub-initiated LPM for all USB communications devices under a xHCI USB 3.0 host. In order to keep the Linux behavior as close as possible to Windows, we need to do the same in Linux. Set the disable_hub_initiated_lpm flag for for all USB communications drivers. I know there aren't currently any USB 3.0 devices that implement these class specifications, but we should be ready if they do. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Gustavo Padovan <gustavo@padovan.org> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Hansjoerg Lipp <hjlipp@web.de> Cc: Tilman Schmidt <tilman@imap.cc> Cc: Karsten Keil <isdn@linux-pingi.de> Cc: Peter Korsgaard <jacmet@sunsite.dk> Cc: Jan Dumon <j.dumon@option.com> Cc: Petko Manolov <petkan@users.sourceforge.net> Cc: Steve Glendinning <steve.glendinning@smsc.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: Kalle Valo <kvalo@qca.qualcomm.com> Cc: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com> Cc: Jouni Malinen <jouni@qca.qualcomm.com> Cc: Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com> Cc: Senthil Balasubramanian <senthilb@qca.qualcomm.com> Cc: Christian Lamparter <chunkeey@googlemail.com> Cc: Brett Rudley <brudley@broadcom.com> Cc: Roland Vossen <rvossen@broadcom.com> Cc: Arend van Spriel <arend@broadcom.com> Cc: "Franky (Zhenhui) Lin" <frankyl@broadcom.com> Cc: Kan Yan <kanyan@broadcom.com> Cc: Dan Williams <dcbw@redhat.com> Cc: Jussi Kivilinna <jussi.kivilinna@mbnet.fi> Cc: Ivo van Doorn <IvDoorn@gmail.com> Cc: Gertjan van Wingerde <gwingerde@gmail.com> Cc: Helmut Schaa <helmut.schaa@googlemail.com> Cc: Herton Ronaldo Krzesinski <herton@canonical.com> Cc: Hin-Tak Leung <htl10@users.sourceforge.net> Cc: Larry Finger <Larry.Finger@lwfinger.net> Cc: Chaoming Li <chaoming_li@realsil.com.cn> Cc: Daniel Drake <dsd@gentoo.org> Cc: Ulrich Kunitz <kune@deine-taler.de> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
2012-04-23 10:08:51 -07:00
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(cdc_ncm_driver);
MODULE_AUTHOR("Hans Petter Selasky");
MODULE_DESCRIPTION("USB CDC NCM host driver");
MODULE_LICENSE("Dual BSD/GPL");