Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix list tests in netfilter ingress support, from Florian Westphal. 2) Fix reversal of input and output interfaces in ingress hook invocation, from Pablo Neira Ayuso. 3) We have a use after free in r8169, caught by Dave Jones, fixed by Francois Romieu. 4) Splice use-after-free fix in AF_UNIX frmo Hannes Frederic Sowa. 5) Three ipv6 route handling bug fixes from Martin KaFai Lau: a) Don't create clone routes not managed by the fib6 tree b) Don't forget to check expiration of DST_NOCACHE routes. c) Handle rt->dst.from == NULL properly. 6) Several AF_PACKET fixes wrt transport header setting and SKB protocol setting, from Daniel Borkmann. 7) Fix thunder driver crash on shutdown, from Pavel Fedin. 8) Several Mellanox driver fixes (max MTU calculations, use of correct DMA unmap in TX path, etc.) from Saeed Mahameed, Tariq Toukan, Doron Tsur, Achiad Shochat, Eran Ben Elisha, and Noa Osherovich. 9) Several mv88e6060 DSA driver fixes (wrong bit definitions for certain registers, etc.) from Neil Armstrong. 10) Make sure to disable preemption while updating per-cpu stats of ip tunnels, from Jason A. Donenfeld. 11) Various ARM64 bpf JIT fixes, from Yang Shi. 12) Flush icache properly in ARM JITs, from Daniel Borkmann. 13) Fix masking of RX and TX interrupts in ravb driver, from Masaru Nagai. 14) Fix netdev feature propagation for devices not implementing ->ndo_set_features(). From Nikolay Aleksandrov. 15) Big endian fix in vmxnet3 driver, from Shrikrishna Khare. 16) RAW socket code increments incorrect SNMP counters, fix from Ben Cartwright-Cox. 17) IPv6 multicast SNMP counters are bumped twice, fix from Neil Horman. 18) Fix handling of VLAN headers on stacked devices when REORDER is disabled. From Vlad Yasevich. 19) Fix SKB leaks and use-after-free in ipvlan and macvlan drivers, from Sabrina Dubroca. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (83 commits) MAINTAINERS: Update Mellanox's Eth NIC driver entries net/core: revert "net: fix __netdev_update_features return.." and add comment af_unix: take receive queue lock while appending new skb rtnetlink: fix frame size warning in rtnl_fill_ifinfo net: use skb_clone to avoid alloc_pages failure. packet: Use PAGE_ALIGNED macro packet: Don't check frames_per_block against negative values net: phy: Use interrupts when available in NOLINK state phy: marvell: Add support for 88E1540 PHY arm64: bpf: make BPF prologue and epilogue align with ARM64 AAPCS macvlan: fix leak in macvlan_handle_frame ipvlan: fix use after free of skb ipvlan: fix leak in ipvlan_rcv_frame vlan: Do not put vlan headers back on bridge and macvlan ports vlan: Fix untag operations of stacked vlans with REORDER_HEADER off via-velocity: unconditionally drop frames with bad l2 length ipg: Remove ipg driver dl2k: Add support for IP1000A-based cards snmp: Remove duplicate OUTMCAST stat increment net: thunder: Check for driver data in nicvf_remove() ...
This commit is contained in:
commit
7f151f1d8a
17
MAINTAINERS
17
MAINTAINERS
@ -5711,13 +5711,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
|
||||
S: Maintained
|
||||
F: net/ipv4/netfilter/ipt_MASQUERADE.c
|
||||
|
||||
IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
|
||||
M: Francois Romieu <romieu@fr.zoreil.com>
|
||||
M: Sorbica Shieh <sorbica@icplus.com.tw>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/icplus/ipg.*
|
||||
|
||||
IPATH DRIVER
|
||||
M: Mike Marciniszyn <infinipath@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
@ -6923,13 +6916,21 @@ F: drivers/scsi/megaraid.*
|
||||
F: drivers/scsi/megaraid/
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx4_en)
|
||||
M: Amir Vadai <amirv@mellanox.com>
|
||||
M: Eugenia Emantayev <eugenia@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx4/en_*
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx5e)
|
||||
M: Saeed Mahameed <saeedm@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
|
||||
|
||||
MELLANOX ETHERNET SWITCH DRIVERS
|
||||
M: Jiri Pirko <jiri@mellanox.com>
|
||||
M: Ido Schimmel <idosch@mellanox.com>
|
||||
|
@ -1061,7 +1061,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
|
||||
}
|
||||
build_epilogue(&ctx);
|
||||
|
||||
flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
|
||||
flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 7
|
||||
if (ctx.imm_count)
|
||||
|
@ -50,7 +50,7 @@ static const int bpf2a64[] = {
|
||||
[BPF_REG_8] = A64_R(21),
|
||||
[BPF_REG_9] = A64_R(22),
|
||||
/* read-only frame pointer to access stack */
|
||||
[BPF_REG_FP] = A64_FP,
|
||||
[BPF_REG_FP] = A64_R(25),
|
||||
/* temporary register for internal BPF JIT */
|
||||
[TMP_REG_1] = A64_R(23),
|
||||
[TMP_REG_2] = A64_R(24),
|
||||
@ -155,18 +155,49 @@ static void build_prologue(struct jit_ctx *ctx)
|
||||
stack_size += 4; /* extra for skb_copy_bits buffer */
|
||||
stack_size = STACK_ALIGN(stack_size);
|
||||
|
||||
/*
|
||||
* BPF prog stack layout
|
||||
*
|
||||
* high
|
||||
* original A64_SP => 0:+-----+ BPF prologue
|
||||
* |FP/LR|
|
||||
* current A64_FP => -16:+-----+
|
||||
* | ... | callee saved registers
|
||||
* +-----+
|
||||
* | | x25/x26
|
||||
* BPF fp register => -80:+-----+
|
||||
* | |
|
||||
* | ... | BPF prog stack
|
||||
* | |
|
||||
* | |
|
||||
* current A64_SP => +-----+
|
||||
* | |
|
||||
* | ... | Function call stack
|
||||
* | |
|
||||
* +-----+
|
||||
* low
|
||||
*
|
||||
*/
|
||||
|
||||
/* Save FP and LR registers to stay align with ARM64 AAPCS */
|
||||
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
|
||||
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
|
||||
|
||||
/* Save callee-saved register */
|
||||
emit(A64_PUSH(r6, r7, A64_SP), ctx);
|
||||
emit(A64_PUSH(r8, r9, A64_SP), ctx);
|
||||
if (ctx->tmp_used)
|
||||
emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
|
||||
|
||||
/* Set up BPF stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
|
||||
/* Save fp (x25) and x26. SP requires 16 bytes alignment */
|
||||
emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
|
||||
|
||||
/* Set up frame pointer */
|
||||
/* Set up BPF prog stack base register (x25) */
|
||||
emit(A64_MOV(1, fp, A64_SP), ctx);
|
||||
|
||||
/* Set up function call stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
|
||||
|
||||
/* Clear registers A and X */
|
||||
emit_a64_mov_i64(ra, 0, ctx);
|
||||
emit_a64_mov_i64(rx, 0, ctx);
|
||||
@ -190,14 +221,17 @@ static void build_epilogue(struct jit_ctx *ctx)
|
||||
/* We're done with BPF stack */
|
||||
emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
|
||||
|
||||
/* Restore fs (x25) and x26 */
|
||||
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
|
||||
|
||||
/* Restore callee-saved register */
|
||||
if (ctx->tmp_used)
|
||||
emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
|
||||
emit(A64_POP(r8, r9, A64_SP), ctx);
|
||||
emit(A64_POP(r6, r7, A64_SP), ctx);
|
||||
|
||||
/* Restore frame pointer */
|
||||
emit(A64_MOV(1, fp, A64_SP), ctx);
|
||||
/* Restore FP/LR registers */
|
||||
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
|
||||
|
||||
/* Set return value */
|
||||
emit(A64_MOV(1, A64_R(0), r0), ctx);
|
||||
@ -758,7 +792,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
if (bpf_jit_enable > 1)
|
||||
bpf_jit_dump(prog->len, image_size, 2, ctx.image);
|
||||
|
||||
bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
|
||||
bpf_flush_icache(header, ctx.image + ctx.idx);
|
||||
|
||||
set_memory_ro((unsigned long)header, header->pages);
|
||||
prog->bpf_func = (void *)ctx.image;
|
||||
|
@ -15,9 +15,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/phy.h>
|
||||
#include <net/dsa.h>
|
||||
|
||||
#define REG_PORT(p) (8 + (p))
|
||||
#define REG_GLOBAL 0x0f
|
||||
#include "mv88e6060.h"
|
||||
|
||||
static int reg_read(struct dsa_switch *ds, int addr, int reg)
|
||||
{
|
||||
@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
|
||||
if (bus == NULL)
|
||||
return NULL;
|
||||
|
||||
ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
|
||||
ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
|
||||
if (ret >= 0) {
|
||||
if (ret == 0x0600)
|
||||
if (ret == PORT_SWITCH_ID_6060)
|
||||
return "Marvell 88E6060 (A0)";
|
||||
if (ret == 0x0601 || ret == 0x0602)
|
||||
if (ret == PORT_SWITCH_ID_6060_R1 ||
|
||||
ret == PORT_SWITCH_ID_6060_R2)
|
||||
return "Marvell 88E6060 (B0)";
|
||||
if ((ret & 0xfff0) == 0x0600)
|
||||
if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
|
||||
return "Marvell 88E6060";
|
||||
}
|
||||
|
||||
@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
|
||||
unsigned long timeout;
|
||||
|
||||
/* Set all ports to the disabled state. */
|
||||
for (i = 0; i < 6; i++) {
|
||||
ret = REG_READ(REG_PORT(i), 0x04);
|
||||
REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
|
||||
for (i = 0; i < MV88E6060_PORTS; i++) {
|
||||
ret = REG_READ(REG_PORT(i), PORT_CONTROL);
|
||||
REG_WRITE(REG_PORT(i), PORT_CONTROL,
|
||||
ret & ~PORT_CONTROL_STATE_MASK);
|
||||
}
|
||||
|
||||
/* Wait for transmit queues to drain. */
|
||||
usleep_range(2000, 4000);
|
||||
|
||||
/* Reset the switch. */
|
||||
REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
|
||||
GLOBAL_ATU_CONTROL_SWRESET |
|
||||
GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
|
||||
GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
|
||||
|
||||
/* Wait up to one second for reset to complete. */
|
||||
timeout = jiffies + 1 * HZ;
|
||||
while (time_before(jiffies, timeout)) {
|
||||
ret = REG_READ(REG_GLOBAL, 0x00);
|
||||
if ((ret & 0x8000) == 0x0000)
|
||||
ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
|
||||
if (ret & GLOBAL_STATUS_INIT_READY)
|
||||
break;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
|
||||
* set the maximum frame size to 1536 bytes, and mask all
|
||||
* interrupt sources.
|
||||
*/
|
||||
REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
|
||||
|
||||
/* Enable automatic address learning, set the address
|
||||
* database size to 1024 entries, and set the default aging
|
||||
* time to 5 minutes.
|
||||
*/
|
||||
REG_WRITE(REG_GLOBAL, 0x0a, 0x2130);
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
|
||||
GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
|
||||
GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
|
||||
* state to Forwarding. Additionally, if this is the CPU
|
||||
* port, enable Ingress and Egress Trailer tagging mode.
|
||||
*/
|
||||
REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003);
|
||||
REG_WRITE(addr, PORT_CONTROL,
|
||||
dsa_is_cpu_port(ds, p) ?
|
||||
PORT_CONTROL_TRAILER |
|
||||
PORT_CONTROL_INGRESS_MODE |
|
||||
PORT_CONTROL_STATE_FORWARDING :
|
||||
PORT_CONTROL_STATE_FORWARDING);
|
||||
|
||||
/* Port based VLAN map: give each port its own address
|
||||
* database, allow the CPU port to talk to each of the 'real'
|
||||
* ports, and allow each of the 'real' ports to only talk to
|
||||
* the CPU port.
|
||||
*/
|
||||
REG_WRITE(addr, 0x06,
|
||||
((p & 0xf) << 12) |
|
||||
(dsa_is_cpu_port(ds, p) ?
|
||||
ds->phys_port_mask :
|
||||
(1 << ds->dst->cpu_port)));
|
||||
REG_WRITE(addr, PORT_VLAN_MAP,
|
||||
((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
|
||||
(dsa_is_cpu_port(ds, p) ?
|
||||
ds->phys_port_mask :
|
||||
BIT(ds->dst->cpu_port)));
|
||||
|
||||
/* Port Association Vector: when learning source addresses
|
||||
* of packets, add the address to the address database using
|
||||
* a port bitmap that has only the bit for this port set and
|
||||
* the other bits clear.
|
||||
*/
|
||||
REG_WRITE(addr, 0x0b, 1 << p);
|
||||
REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
for (i = 0; i < MV88E6060_PORTS; i++) {
|
||||
ret = mv88e6060_setup_port(ds, i);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds)
|
||||
|
||||
static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
|
||||
{
|
||||
REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
|
||||
REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
|
||||
REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
|
||||
/* Use the same MAC Address as FD Pause frames for all ports */
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv88e6060_port_to_phy_addr(int port)
|
||||
{
|
||||
if (port >= 0 && port <= 5)
|
||||
if (port >= 0 && port < MV88E6060_PORTS)
|
||||
return port;
|
||||
return -1;
|
||||
}
|
||||
@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
|
||||
return reg_write(ds, addr, regnum, val);
|
||||
}
|
||||
|
||||
static void mv88e6060_poll_link(struct dsa_switch *ds)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DSA_MAX_PORTS; i++) {
|
||||
struct net_device *dev;
|
||||
int uninitialized_var(port_status);
|
||||
int link;
|
||||
int speed;
|
||||
int duplex;
|
||||
int fc;
|
||||
|
||||
dev = ds->ports[i];
|
||||
if (dev == NULL)
|
||||
continue;
|
||||
|
||||
link = 0;
|
||||
if (dev->flags & IFF_UP) {
|
||||
port_status = reg_read(ds, REG_PORT(i), 0x00);
|
||||
if (port_status < 0)
|
||||
continue;
|
||||
|
||||
link = !!(port_status & 0x1000);
|
||||
}
|
||||
|
||||
if (!link) {
|
||||
if (netif_carrier_ok(dev)) {
|
||||
netdev_info(dev, "link down\n");
|
||||
netif_carrier_off(dev);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
speed = (port_status & 0x0100) ? 100 : 10;
|
||||
duplex = (port_status & 0x0200) ? 1 : 0;
|
||||
fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
|
||||
|
||||
if (!netif_carrier_ok(dev)) {
|
||||
netdev_info(dev,
|
||||
"link up, %d Mb/s, %s duplex, flow control %sabled\n",
|
||||
speed,
|
||||
duplex ? "full" : "half",
|
||||
fc ? "en" : "dis");
|
||||
netif_carrier_on(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct dsa_switch_driver mv88e6060_switch_driver = {
|
||||
.tag_protocol = DSA_TAG_PROTO_TRAILER,
|
||||
.probe = mv88e6060_probe,
|
||||
@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = {
|
||||
.set_addr = mv88e6060_set_addr,
|
||||
.phy_read = mv88e6060_phy_read,
|
||||
.phy_write = mv88e6060_phy_write,
|
||||
.poll_link = mv88e6060_poll_link,
|
||||
};
|
||||
|
||||
static int __init mv88e6060_init(void)
|
||||
|
111
drivers/net/dsa/mv88e6060.h
Normal file
111
drivers/net/dsa/mv88e6060.h
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
|
||||
* Copyright (c) 2015 Neil Armstrong
|
||||
*
|
||||
* Based on mv88e6xxx.h
|
||||
* Copyright (c) 2008 Marvell Semiconductor
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef __MV88E6060_H
|
||||
#define __MV88E6060_H
|
||||
|
||||
#define MV88E6060_PORTS 6
|
||||
|
||||
#define REG_PORT(p) (0x8 + (p))
|
||||
#define PORT_STATUS 0x00
|
||||
#define PORT_STATUS_PAUSE_EN BIT(15)
|
||||
#define PORT_STATUS_MY_PAUSE BIT(14)
|
||||
#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
|
||||
#define PORT_STATUS_RESOLVED BIT(13)
|
||||
#define PORT_STATUS_LINK BIT(12)
|
||||
#define PORT_STATUS_PORTMODE BIT(11)
|
||||
#define PORT_STATUS_PHYMODE BIT(10)
|
||||
#define PORT_STATUS_DUPLEX BIT(9)
|
||||
#define PORT_STATUS_SPEED BIT(8)
|
||||
#define PORT_SWITCH_ID 0x03
|
||||
#define PORT_SWITCH_ID_6060 0x0600
|
||||
#define PORT_SWITCH_ID_6060_MASK 0xfff0
|
||||
#define PORT_SWITCH_ID_6060_R1 0x0601
|
||||
#define PORT_SWITCH_ID_6060_R2 0x0602
|
||||
#define PORT_CONTROL 0x04
|
||||
#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15)
|
||||
#define PORT_CONTROL_TRAILER BIT(14)
|
||||
#define PORT_CONTROL_HEADER BIT(11)
|
||||
#define PORT_CONTROL_INGRESS_MODE BIT(8)
|
||||
#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
|
||||
#define PORT_CONTROL_STATE_MASK 0x03
|
||||
#define PORT_CONTROL_STATE_DISABLED 0x00
|
||||
#define PORT_CONTROL_STATE_BLOCKING 0x01
|
||||
#define PORT_CONTROL_STATE_LEARNING 0x02
|
||||
#define PORT_CONTROL_STATE_FORWARDING 0x03
|
||||
#define PORT_VLAN_MAP 0x06
|
||||
#define PORT_VLAN_MAP_DBNUM_SHIFT 12
|
||||
#define PORT_VLAN_MAP_TABLE_MASK 0x1f
|
||||
#define PORT_ASSOC_VECTOR 0x0b
|
||||
#define PORT_ASSOC_VECTOR_MONITOR BIT(15)
|
||||
#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f
|
||||
#define PORT_RX_CNTR 0x10
|
||||
#define PORT_TX_CNTR 0x11
|
||||
|
||||
#define REG_GLOBAL 0x0f
|
||||
#define GLOBAL_STATUS 0x00
|
||||
#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12)
|
||||
#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12)
|
||||
#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12)
|
||||
#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12)
|
||||
#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12)
|
||||
#define GLOBAL_STATUS_INIT_READY BIT(11)
|
||||
#define GLOBAL_STATUS_ATU_FULL BIT(3)
|
||||
#define GLOBAL_STATUS_ATU_DONE BIT(2)
|
||||
#define GLOBAL_STATUS_PHY_INT BIT(1)
|
||||
#define GLOBAL_STATUS_EEINT BIT(0)
|
||||
#define GLOBAL_MAC_01 0x01
|
||||
#define GLOBAL_MAC_01_DIFF_ADDR BIT(8)
|
||||
#define GLOBAL_MAC_23 0x02
|
||||
#define GLOBAL_MAC_45 0x03
|
||||
#define GLOBAL_CONTROL 0x04
|
||||
#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13)
|
||||
#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10)
|
||||
#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9)
|
||||
#define GLOBAL_CONTROL_CTRMODE BIT(8)
|
||||
#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3)
|
||||
#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
|
||||
#define GLOBAL_CONTROL_PHYINT_EN BIT(1)
|
||||
#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
|
||||
#define GLOBAL_ATU_CONTROL 0x0a
|
||||
#define GLOBAL_ATU_CONTROL_SWRESET BIT(15)
|
||||
#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14)
|
||||
#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12)
|
||||
#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12)
|
||||
#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12)
|
||||
#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4
|
||||
#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4)
|
||||
#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4)
|
||||
#define GLOBAL_ATU_OP 0x0b
|
||||
#define GLOBAL_ATU_OP_BUSY BIT(15)
|
||||
#define GLOBAL_ATU_OP_NOP (0 << 12)
|
||||
#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
|
||||
#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY)
|
||||
#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
|
||||
#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
|
||||
#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
|
||||
#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
|
||||
#define GLOBAL_ATU_DATA 0x0c
|
||||
#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0
|
||||
#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
|
||||
#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
|
||||
#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
|
||||
#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
|
||||
#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f
|
||||
#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
|
||||
#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e
|
||||
#define GLOBAL_ATU_MAC_01 0x0d
|
||||
#define GLOBAL_ATU_MAC_23 0x0e
|
||||
#define GLOBAL_ATU_MAC_45 0x0f
|
||||
|
||||
#endif
|
@ -78,7 +78,6 @@ source "drivers/net/ethernet/ibm/Kconfig"
|
||||
source "drivers/net/ethernet/intel/Kconfig"
|
||||
source "drivers/net/ethernet/i825xx/Kconfig"
|
||||
source "drivers/net/ethernet/xscale/Kconfig"
|
||||
source "drivers/net/ethernet/icplus/Kconfig"
|
||||
|
||||
config JME
|
||||
tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
|
||||
|
@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
|
||||
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
|
||||
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
|
||||
obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
|
||||
obj-$(CONFIG_IP1000) += icplus/
|
||||
obj-$(CONFIG_JME) += jme.o
|
||||
obj-$(CONFIG_KORINA) += korina.o
|
||||
obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
|
||||
|
@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
||||
|
||||
/* VF with OLD Hypervisor or old PF do not support filtering */
|
||||
if (IS_PF(bp)) {
|
||||
if (CHIP_IS_E1x(bp))
|
||||
if (chip_is_e1x)
|
||||
bp->accept_any_vlan = true;
|
||||
else
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev)
|
||||
#endif
|
||||
|
||||
/* For PCI-E Advanced Error Recovery (AER) Interface */
|
||||
static struct pci_error_handlers liquidio_err_handler = {
|
||||
static const struct pci_error_handlers liquidio_err_handler = {
|
||||
.error_detected = liquidio_pcie_error_detected,
|
||||
.mmio_enabled = liquidio_pcie_mmio_enabled,
|
||||
.slot_reset = liquidio_pcie_slot_reset,
|
||||
|
@ -1583,8 +1583,14 @@ err_disable_device:
|
||||
static void nicvf_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nicvf *nic = netdev_priv(netdev);
|
||||
struct net_device *pnetdev = nic->pnicvf->netdev;
|
||||
struct nicvf *nic;
|
||||
struct net_device *pnetdev;
|
||||
|
||||
if (!netdev)
|
||||
return;
|
||||
|
||||
nic = netdev_priv(netdev);
|
||||
pnetdev = nic->pnicvf->netdev;
|
||||
|
||||
/* Check if this Qset is assigned to different VF.
|
||||
* If yes, clean primary and all secondary Qsets.
|
||||
|
@ -17,15 +17,16 @@ config NET_VENDOR_DLINK
|
||||
if NET_VENDOR_DLINK
|
||||
|
||||
config DL2K
|
||||
tristate "DL2000/TC902x-based Gigabit Ethernet support"
|
||||
tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support"
|
||||
depends on PCI
|
||||
select CRC32
|
||||
---help---
|
||||
This driver supports DL2000/TC902x-based Gigabit ethernet cards,
|
||||
This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards,
|
||||
which includes
|
||||
D-Link DGE-550T Gigabit Ethernet Adapter.
|
||||
D-Link DL2000-based Gigabit Ethernet Adapter.
|
||||
Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
|
||||
ICPlus IP1000A-based cards
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called dl2k.
|
||||
|
@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (err)
|
||||
goto err_out_unmap_rx;
|
||||
|
||||
if (np->chip_id == CHIP_IP1000A &&
|
||||
(np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
|
||||
/* PHY magic taken from ipg driver, undocumented registers */
|
||||
mii_write(dev, np->phy_addr, 31, 0x0001);
|
||||
mii_write(dev, np->phy_addr, 27, 0x01e0);
|
||||
mii_write(dev, np->phy_addr, 31, 0x0002);
|
||||
mii_write(dev, np->phy_addr, 27, 0xeb8e);
|
||||
mii_write(dev, np->phy_addr, 31, 0x0000);
|
||||
mii_write(dev, np->phy_addr, 30, 0x005e);
|
||||
/* advertise 1000BASE-T half & full duplex, prefer MASTER */
|
||||
mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
|
||||
}
|
||||
|
||||
/* Fiber device? */
|
||||
np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
|
||||
np->link_status = 0;
|
||||
@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev)
|
||||
for (i = 0; i < 6; i++)
|
||||
dev->dev_addr[i] = psrom->mac_addr[i];
|
||||
|
||||
if (np->chip_id == CHIP_IP1000A) {
|
||||
np->led_mode = psrom->led_mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
|
||||
return 0;
|
||||
}
|
||||
@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rio_set_led_mode(struct net_device *dev)
|
||||
{
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
void __iomem *ioaddr = np->ioaddr;
|
||||
u32 mode;
|
||||
|
||||
if (np->chip_id != CHIP_IP1000A)
|
||||
return;
|
||||
|
||||
mode = dr32(ASICCtrl);
|
||||
mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
|
||||
|
||||
if (np->led_mode & 0x01)
|
||||
mode |= IPG_AC_LED_MODE;
|
||||
if (np->led_mode & 0x02)
|
||||
mode |= IPG_AC_LED_MODE_BIT_1;
|
||||
if (np->led_mode & 0x08)
|
||||
mode |= IPG_AC_LED_SPEED;
|
||||
|
||||
dw32(ASICCtrl, mode);
|
||||
}
|
||||
|
||||
static int
|
||||
rio_open (struct net_device *dev)
|
||||
{
|
||||
@ -424,6 +464,8 @@ rio_open (struct net_device *dev)
|
||||
GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
|
||||
mdelay(10);
|
||||
|
||||
rio_set_led_mode(dev);
|
||||
|
||||
/* DebugCtrl bit 4, 5, 9 must set */
|
||||
dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
|
||||
|
||||
@ -433,9 +475,13 @@ rio_open (struct net_device *dev)
|
||||
|
||||
alloc_list (dev);
|
||||
|
||||
/* Get station address */
|
||||
for (i = 0; i < 6; i++)
|
||||
dw8(StationAddr0 + i, dev->dev_addr[i]);
|
||||
/* Set station address */
|
||||
/* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
|
||||
* too. However, it doesn't work on IP1000A so we use 16-bit access.
|
||||
*/
|
||||
for (i = 0; i < 3; i++)
|
||||
dw16(StationAddr0 + 2 * i,
|
||||
cpu_to_le16(((u16 *)dev->dev_addr)[i]));
|
||||
|
||||
set_multicast (dev);
|
||||
if (np->coalesce) {
|
||||
@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status)
|
||||
break;
|
||||
mdelay (1);
|
||||
}
|
||||
rio_set_led_mode(dev);
|
||||
rio_free_tx (dev, 1);
|
||||
/* Reset TFDListPtr */
|
||||
dw32(TFDListPtr0, np->tx_ring_dma +
|
||||
@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status)
|
||||
break;
|
||||
mdelay (1);
|
||||
}
|
||||
rio_set_led_mode(dev);
|
||||
/* Let TxStartThresh stay default value */
|
||||
}
|
||||
/* Maximum Collisions */
|
||||
@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status)
|
||||
dev->name, int_status);
|
||||
dw16(ASICCtrl + 2, GlobalReset | HostReset);
|
||||
mdelay (500);
|
||||
rio_set_led_mode(dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits {
|
||||
ResetBusy = 0x0400,
|
||||
};
|
||||
|
||||
#define IPG_AC_LED_MODE BIT(14)
|
||||
#define IPG_AC_LED_SPEED BIT(27)
|
||||
#define IPG_AC_LED_MODE_BIT_1 BIT(29)
|
||||
|
||||
/* Transmit Frame Control bits */
|
||||
enum TFC_bits {
|
||||
DwordAlign = 0x00000000,
|
||||
@ -332,7 +336,10 @@ typedef struct t_SROM {
|
||||
u16 asic_ctrl; /* 0x02 */
|
||||
u16 sub_vendor_id; /* 0x04 */
|
||||
u16 sub_system_id; /* 0x06 */
|
||||
u16 reserved1[12]; /* 0x08-0x1f */
|
||||
u16 pci_base_1; /* 0x08 (IP1000A only) */
|
||||
u16 pci_base_2; /* 0x0a (IP1000A only) */
|
||||
u16 led_mode; /* 0x0c (IP1000A only) */
|
||||
u16 reserved1[9]; /* 0x0e-0x1f */
|
||||
u8 mac_addr[6]; /* 0x20-0x25 */
|
||||
u8 reserved2[10]; /* 0x26-0x2f */
|
||||
u8 sib[204]; /* 0x30-0xfb */
|
||||
@ -397,6 +404,7 @@ struct netdev_private {
|
||||
u16 advertising; /* NWay media advertisement */
|
||||
u16 negotiate; /* Negotiated media */
|
||||
int phy_addr; /* PHY addresses. */
|
||||
u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
|
||||
};
|
||||
|
||||
/* The station address location in the EEPROM. */
|
||||
@ -407,10 +415,15 @@ struct netdev_private {
|
||||
class_mask of the class are honored during the comparison.
|
||||
driver_data Data private to the driver.
|
||||
*/
|
||||
#define CHIP_IP1000A 1
|
||||
|
||||
static const struct pci_device_id rio_pci_tbl[] = {
|
||||
{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A },
|
||||
{ PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A },
|
||||
{ PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A },
|
||||
{ PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
|
||||
|
@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
||||
static int be_set_rss_hash_opts(struct be_adapter *adapter,
|
||||
struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
struct be_rx_obj *rxo;
|
||||
int status = 0, i, j;
|
||||
u8 rsstable[128];
|
||||
int status;
|
||||
u32 rss_flags = adapter->rss_info.rss_flags;
|
||||
|
||||
if (cmd->data != L3_RSS_FLAGS &&
|
||||
@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
|
||||
}
|
||||
|
||||
if (rss_flags == adapter->rss_info.rss_flags)
|
||||
return status;
|
||||
|
||||
if (be_multi_rxq(adapter)) {
|
||||
for (j = 0; j < 128; j += adapter->num_rss_qs) {
|
||||
for_all_rss_queues(adapter, rxo, i) {
|
||||
if ((j + i) >= 128)
|
||||
break;
|
||||
rsstable[j + i] = rxo->rss_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
|
||||
rss_flags, 128, adapter->rss_info.rss_hkey);
|
||||
rss_flags, RSS_INDIR_TABLE_LEN,
|
||||
adapter->rss_info.rss_hkey);
|
||||
if (!status)
|
||||
adapter->rss_info.rss_flags = rss_flags;
|
||||
|
||||
|
@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
|
||||
|
||||
netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
|
||||
rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
|
||||
128, rss_key);
|
||||
RSS_INDIR_TABLE_LEN, rss_key);
|
||||
if (rc) {
|
||||
rss->rss_flags = RSS_ENABLE_NONE;
|
||||
return rc;
|
||||
|
@ -1,13 +0,0 @@
|
||||
#
|
||||
# IC Plus device configuration
|
||||
#
|
||||
|
||||
config IP1000
|
||||
tristate "IP1000 Gigabit Ethernet support"
|
||||
depends on PCI
|
||||
select MII
|
||||
---help---
|
||||
This driver supports IP1000 gigabit Ethernet cards.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called ipg. This is recommended.
|
@ -1,5 +0,0 @@
|
||||
#
|
||||
# Makefile for the IC Plus device drivers
|
||||
#
|
||||
|
||||
obj-$(CONFIG_IP1000) += ipg.o
|
File diff suppressed because it is too large
Load Diff
@ -1,748 +0,0 @@
|
||||
/*
|
||||
* Include file for Gigabit Ethernet device driver for Network
|
||||
* Interface Cards (NICs) utilizing the Tamarack Microelectronics
|
||||
* Inc. IPG Gigabit or Triple Speed Ethernet Media Access
|
||||
* Controller.
|
||||
*/
|
||||
#ifndef __LINUX_IPG_H
|
||||
#define __LINUX_IPG_H
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <asm/bitops.h>
|
||||
|
||||
/*
|
||||
* Constants
|
||||
*/
|
||||
|
||||
/* GMII based PHY IDs */
|
||||
#define NS 0x2000
|
||||
#define MARVELL 0x0141
|
||||
#define ICPLUS_PHY 0x243
|
||||
|
||||
/* NIC Physical Layer Device MII register fields. */
|
||||
#define MII_PHY_SELECTOR_IEEE8023 0x0001
|
||||
#define MII_PHY_TECHABILITYFIELD 0x1FE0
|
||||
|
||||
/* GMII_PHY_1000 need to set to prefer master */
|
||||
#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
|
||||
|
||||
/* NIC Physical Layer Device GMII constants. */
|
||||
#define GMII_PREAMBLE 0xFFFFFFFF
|
||||
#define GMII_ST 0x1
|
||||
#define GMII_READ 0x2
|
||||
#define GMII_WRITE 0x1
|
||||
#define GMII_TA_READ_MASK 0x1
|
||||
#define GMII_TA_WRITE 0x2
|
||||
|
||||
/* I/O register offsets. */
|
||||
enum ipg_regs {
|
||||
DMA_CTRL = 0x00,
|
||||
RX_DMA_STATUS = 0x08, /* Unused + reserved */
|
||||
TFD_LIST_PTR_0 = 0x10,
|
||||
TFD_LIST_PTR_1 = 0x14,
|
||||
TX_DMA_BURST_THRESH = 0x18,
|
||||
TX_DMA_URGENT_THRESH = 0x19,
|
||||
TX_DMA_POLL_PERIOD = 0x1a,
|
||||
RFD_LIST_PTR_0 = 0x1c,
|
||||
RFD_LIST_PTR_1 = 0x20,
|
||||
RX_DMA_BURST_THRESH = 0x24,
|
||||
RX_DMA_URGENT_THRESH = 0x25,
|
||||
RX_DMA_POLL_PERIOD = 0x26,
|
||||
DEBUG_CTRL = 0x2c,
|
||||
ASIC_CTRL = 0x30,
|
||||
FIFO_CTRL = 0x38, /* Unused */
|
||||
FLOW_OFF_THRESH = 0x3c,
|
||||
FLOW_ON_THRESH = 0x3e,
|
||||
EEPROM_DATA = 0x48,
|
||||
EEPROM_CTRL = 0x4a,
|
||||
EXPROM_ADDR = 0x4c, /* Unused */
|
||||
EXPROM_DATA = 0x50, /* Unused */
|
||||
WAKE_EVENT = 0x51, /* Unused */
|
||||
COUNTDOWN = 0x54, /* Unused */
|
||||
INT_STATUS_ACK = 0x5a,
|
||||
INT_ENABLE = 0x5c,
|
||||
INT_STATUS = 0x5e, /* Unused */
|
||||
TX_STATUS = 0x60,
|
||||
MAC_CTRL = 0x6c,
|
||||
VLAN_TAG = 0x70, /* Unused */
|
||||
PHY_SET = 0x75,
|
||||
PHY_CTRL = 0x76,
|
||||
STATION_ADDRESS_0 = 0x78,
|
||||
STATION_ADDRESS_1 = 0x7a,
|
||||
STATION_ADDRESS_2 = 0x7c,
|
||||
MAX_FRAME_SIZE = 0x86,
|
||||
RECEIVE_MODE = 0x88,
|
||||
HASHTABLE_0 = 0x8c,
|
||||
HASHTABLE_1 = 0x90,
|
||||
RMON_STATISTICS_MASK = 0x98,
|
||||
STATISTICS_MASK = 0x9c,
|
||||
RX_JUMBO_FRAMES = 0xbc, /* Unused */
|
||||
TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */
|
||||
IP_CHECKSUM_ERRORS = 0xc2, /* Unused */
|
||||
UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */
|
||||
TX_JUMBO_FRAMES = 0xf4 /* Unused */
|
||||
};
|
||||
|
||||
/* Ethernet MIB statistic register offsets. */
|
||||
#define IPG_OCTETRCVOK 0xA8
|
||||
#define IPG_MCSTOCTETRCVDOK 0xAC
|
||||
#define IPG_BCSTOCTETRCVOK 0xB0
|
||||
#define IPG_FRAMESRCVDOK 0xB4
|
||||
#define IPG_MCSTFRAMESRCVDOK 0xB8
|
||||
#define IPG_BCSTFRAMESRCVDOK 0xBE
|
||||
#define IPG_MACCONTROLFRAMESRCVD 0xC6
|
||||
#define IPG_FRAMETOOLONGERRORS 0xC8
|
||||
#define IPG_INRANGELENGTHERRORS 0xCA
|
||||
#define IPG_FRAMECHECKSEQERRORS 0xCC
|
||||
#define IPG_FRAMESLOSTRXERRORS 0xCE
|
||||
#define IPG_OCTETXMTOK 0xD0
|
||||
#define IPG_MCSTOCTETXMTOK 0xD4
|
||||
#define IPG_BCSTOCTETXMTOK 0xD8
|
||||
#define IPG_FRAMESXMTDOK 0xDC
|
||||
#define IPG_MCSTFRAMESXMTDOK 0xE0
|
||||
#define IPG_FRAMESWDEFERREDXMT 0xE4
|
||||
#define IPG_LATECOLLISIONS 0xE8
|
||||
#define IPG_MULTICOLFRAMES 0xEC
|
||||
#define IPG_SINGLECOLFRAMES 0xF0
|
||||
#define IPG_BCSTFRAMESXMTDOK 0xF6
|
||||
#define IPG_CARRIERSENSEERRORS 0xF8
|
||||
#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
|
||||
#define IPG_FRAMESABORTXSCOLLS 0xFC
|
||||
#define IPG_FRAMESWEXDEFERRAL 0xFE
|
||||
|
||||
/* RMON statistic register offsets. */
|
||||
#define IPG_ETHERSTATSCOLLISIONS 0x100
|
||||
#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104
|
||||
#define IPG_ETHERSTATSPKTSTRANSMIT 0x108
|
||||
#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C
|
||||
#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110
|
||||
#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114
|
||||
#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118
|
||||
#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C
|
||||
#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
|
||||
#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
|
||||
#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
|
||||
#define IPG_ETHERSTATSFRAGMENTS 0x12C
|
||||
#define IPG_ETHERSTATSJABBERS 0x130
|
||||
#define IPG_ETHERSTATSOCTETS 0x134
|
||||
#define IPG_ETHERSTATSPKTS 0x138
|
||||
#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
|
||||
#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140
|
||||
#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144
|
||||
#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148
|
||||
#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C
|
||||
#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150
|
||||
|
||||
/* RMON statistic register equivalents. */
|
||||
#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0
|
||||
#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6
|
||||
#define IPG_ETHERSTATSMULTICASTPKTS 0xB8
|
||||
#define IPG_ETHERSTATSBROADCASTPKTS 0xBE
|
||||
#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8
|
||||
#define IPG_ETHERSTATSDROPEVENTS 0xCE
|
||||
|
||||
/* Serial EEPROM offsets */
|
||||
#define IPG_EEPROM_CONFIGPARAM 0x00
|
||||
#define IPG_EEPROM_ASICCTRL 0x01
|
||||
#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
|
||||
#define IPG_EEPROM_SUBSYSTEMID 0x03
|
||||
#define IPG_EEPROM_STATIONADDRESS0 0x10
|
||||
#define IPG_EEPROM_STATIONADDRESS1 0x11
|
||||
#define IPG_EEPROM_STATIONADDRESS2 0x12
|
||||
|
||||
/* Register & data structure bit masks */
|
||||
|
||||
/* PCI register masks. */
|
||||
|
||||
/* IOBaseAddress */
|
||||
#define IPG_PIB_RSVD_MASK 0xFFFFFE01
|
||||
#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
|
||||
#define IPG_PIB_IOBASEADDRIND 0x00000001
|
||||
|
||||
/* MemBaseAddress */
|
||||
#define IPG_PMB_RSVD_MASK 0xFFFFFE07
|
||||
#define IPG_PMB_MEMBASEADDRIND 0x00000001
|
||||
#define IPG_PMB_MEMMAPTYPE 0x00000006
|
||||
#define IPG_PMB_MEMMAPTYPE0 0x00000002
|
||||
#define IPG_PMB_MEMMAPTYPE1 0x00000004
|
||||
#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
|
||||
|
||||
/* ConfigStatus */
|
||||
#define IPG_CS_RSVD_MASK 0xFFB0
|
||||
#define IPG_CS_CAPABILITIES 0x0010
|
||||
#define IPG_CS_66MHZCAPABLE 0x0020
|
||||
#define IPG_CS_FASTBACK2BACK 0x0080
|
||||
#define IPG_CS_DATAPARITYREPORTED 0x0100
|
||||
#define IPG_CS_DEVSELTIMING 0x0600
|
||||
#define IPG_CS_SIGNALEDTARGETABORT 0x0800
|
||||
#define IPG_CS_RECEIVEDTARGETABORT 0x1000
|
||||
#define IPG_CS_RECEIVEDMASTERABORT 0x2000
|
||||
#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000
|
||||
#define IPG_CS_DETECTEDPARITYERROR 0x8000
|
||||
|
||||
/* TFD data structure masks. */
|
||||
|
||||
/* TFDList, TFC */
|
||||
#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
|
||||
#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
|
||||
#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
|
||||
#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
|
||||
#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
|
||||
#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
|
||||
#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
|
||||
#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
|
||||
#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
|
||||
#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
|
||||
#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
|
||||
#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
|
||||
#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
|
||||
#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
|
||||
#define IPG_TFC_TFDDONE 0x0000000080000000ULL
|
||||
#define IPG_TFC_VID 0x00000FFF00000000ULL
|
||||
#define IPG_TFC_CFI 0x0000100000000000ULL
|
||||
#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
|
||||
|
||||
/* TFDList, FragInfo */
|
||||
#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
|
||||
#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
|
||||
#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
|
||||
|
||||
/* RFD data structure masks. */
|
||||
|
||||
/* RFDList, RFS */
|
||||
#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
|
||||
#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
|
||||
#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
|
||||
#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
|
||||
#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
|
||||
#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
|
||||
#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
|
||||
#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
|
||||
#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
|
||||
#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
|
||||
#define IPG_RFS_TCPERROR 0x0000000001000000ULL
|
||||
#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
|
||||
#define IPG_RFS_UDPERROR 0x0000000004000000ULL
|
||||
#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
|
||||
#define IPG_RFS_IPERROR 0x0000000010000000ULL
|
||||
#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
|
||||
#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
|
||||
#define IPG_RFS_RFDDONE 0x0000000080000000ULL
|
||||
#define IPG_RFS_TCI 0x0000FFFF00000000ULL
|
||||
|
||||
/* RFDList, FragInfo */
|
||||
#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
|
||||
#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
|
||||
#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
|
||||
|
||||
/* I/O Register masks. */
|
||||
|
||||
/* RMON Statistics Mask */
|
||||
#define IPG_RZ_ALL 0x0FFFFFFF
|
||||
|
||||
/* Statistics Mask */
|
||||
#define IPG_SM_ALL 0x0FFFFFFF
|
||||
#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
|
||||
#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
|
||||
#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
|
||||
#define IPG_SM_RXJUMBOFRAMES 0x00000008
|
||||
#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
|
||||
#define IPG_SM_IPCHECKSUMERRORS 0x00000020
|
||||
#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
|
||||
#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
|
||||
#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
|
||||
#define IPG_SM_INRANGELENGTHERRORS 0x00000200
|
||||
#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
|
||||
#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
|
||||
#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
|
||||
#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
|
||||
#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
|
||||
#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
|
||||
#define IPG_SM_LATECOLLISIONS 0x00010000
|
||||
#define IPG_SM_MULTICOLFRAMES 0x00020000
|
||||
#define IPG_SM_SINGLECOLFRAMES 0x00040000
|
||||
#define IPG_SM_TXJUMBOFRAMES 0x00080000
|
||||
#define IPG_SM_CARRIERSENSEERRORS 0x00100000
|
||||
#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
|
||||
#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
|
||||
#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
|
||||
|
||||
/* Countdown */
|
||||
#define IPG_CD_RSVD_MASK 0x0700FFFF
|
||||
#define IPG_CD_COUNT 0x0000FFFF
|
||||
#define IPG_CD_COUNTDOWNSPEED 0x01000000
|
||||
#define IPG_CD_COUNTDOWNMODE 0x02000000
|
||||
#define IPG_CD_COUNTINTENABLED 0x04000000
|
||||
|
||||
/* TxDMABurstThresh */
|
||||
#define IPG_TB_RSVD_MASK 0xFF
|
||||
|
||||
/* TxDMAUrgentThresh */
|
||||
#define IPG_TU_RSVD_MASK 0xFF
|
||||
|
||||
/* TxDMAPollPeriod */
|
||||
#define IPG_TP_RSVD_MASK 0xFF
|
||||
|
||||
/* RxDMAUrgentThresh */
|
||||
#define IPG_RU_RSVD_MASK 0xFF
|
||||
|
||||
/* RxDMAPollPeriod */
|
||||
#define IPG_RP_RSVD_MASK 0xFF
|
||||
|
||||
/* ReceiveMode */
|
||||
#define IPG_RM_RSVD_MASK 0x3F
|
||||
#define IPG_RM_RECEIVEUNICAST 0x01
|
||||
#define IPG_RM_RECEIVEMULTICAST 0x02
|
||||
#define IPG_RM_RECEIVEBROADCAST 0x04
|
||||
#define IPG_RM_RECEIVEALLFRAMES 0x08
|
||||
#define IPG_RM_RECEIVEMULTICASTHASH 0x10
|
||||
#define IPG_RM_RECEIVEIPMULTICAST 0x20
|
||||
|
||||
/* PhySet */
|
||||
#define IPG_PS_MEM_LENB9B 0x01
|
||||
#define IPG_PS_MEM_LEN9 0x02
|
||||
#define IPG_PS_NON_COMPDET 0x04
|
||||
|
||||
/* PhyCtrl */
|
||||
#define IPG_PC_RSVD_MASK 0xFF
|
||||
#define IPG_PC_MGMTCLK_LO 0x00
|
||||
#define IPG_PC_MGMTCLK_HI 0x01
|
||||
#define IPG_PC_MGMTCLK 0x01
|
||||
#define IPG_PC_MGMTDATA 0x02
|
||||
#define IPG_PC_MGMTDIR 0x04
|
||||
#define IPG_PC_DUPLEX_POLARITY 0x08
|
||||
#define IPG_PC_DUPLEX_STATUS 0x10
|
||||
#define IPG_PC_LINK_POLARITY 0x20
|
||||
#define IPG_PC_LINK_SPEED 0xC0
|
||||
#define IPG_PC_LINK_SPEED_10MBPS 0x40
|
||||
#define IPG_PC_LINK_SPEED_100MBPS 0x80
|
||||
#define IPG_PC_LINK_SPEED_1000MBPS 0xC0
|
||||
|
||||
/* DMACtrl */
|
||||
#define IPG_DC_RSVD_MASK 0xC07D9818
|
||||
#define IPG_DC_RX_DMA_COMPLETE 0x00000008
|
||||
#define IPG_DC_RX_DMA_POLL_NOW 0x00000010
|
||||
#define IPG_DC_TX_DMA_COMPLETE 0x00000800
|
||||
#define IPG_DC_TX_DMA_POLL_NOW 0x00001000
|
||||
#define IPG_DC_TX_DMA_IN_PROG 0x00008000
|
||||
#define IPG_DC_RX_EARLY_DISABLE 0x00010000
|
||||
#define IPG_DC_MWI_DISABLE 0x00040000
|
||||
#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000
|
||||
#define IPG_DC_TX_BURST_LIMIT 0x00700000
|
||||
#define IPG_DC_TARGET_ABORT 0x40000000
|
||||
#define IPG_DC_MASTER_ABORT 0x80000000
|
||||
|
||||
/* ASICCtrl */
|
||||
#define IPG_AC_RSVD_MASK 0x07FFEFF2
|
||||
#define IPG_AC_EXP_ROM_SIZE 0x00000002
|
||||
#define IPG_AC_PHY_SPEED10 0x00000010
|
||||
#define IPG_AC_PHY_SPEED100 0x00000020
|
||||
#define IPG_AC_PHY_SPEED1000 0x00000040
|
||||
#define IPG_AC_PHY_MEDIA 0x00000080
|
||||
#define IPG_AC_FORCED_CFG 0x00000700
|
||||
#define IPG_AC_D3RESETDISABLE 0x00000800
|
||||
#define IPG_AC_SPEED_UP_MODE 0x00002000
|
||||
#define IPG_AC_LED_MODE 0x00004000
|
||||
#define IPG_AC_RST_OUT_POLARITY 0x00008000
|
||||
#define IPG_AC_GLOBAL_RESET 0x00010000
|
||||
#define IPG_AC_RX_RESET 0x00020000
|
||||
#define IPG_AC_TX_RESET 0x00040000
|
||||
#define IPG_AC_DMA 0x00080000
|
||||
#define IPG_AC_FIFO 0x00100000
|
||||
#define IPG_AC_NETWORK 0x00200000
|
||||
#define IPG_AC_HOST 0x00400000
|
||||
#define IPG_AC_AUTO_INIT 0x00800000
|
||||
#define IPG_AC_RST_OUT 0x01000000
|
||||
#define IPG_AC_INT_REQUEST 0x02000000
|
||||
#define IPG_AC_RESET_BUSY 0x04000000
|
||||
#define IPG_AC_LED_SPEED 0x08000000
|
||||
#define IPG_AC_LED_MODE_BIT_1 0x20000000
|
||||
|
||||
/* EepromCtrl */
|
||||
#define IPG_EC_RSVD_MASK 0x83FF
|
||||
#define IPG_EC_EEPROM_ADDR 0x00FF
|
||||
#define IPG_EC_EEPROM_OPCODE 0x0300
|
||||
#define IPG_EC_EEPROM_SUBCOMMAD 0x0000
|
||||
#define IPG_EC_EEPROM_WRITEOPCODE 0x0100
|
||||
#define IPG_EC_EEPROM_READOPCODE 0x0200
|
||||
#define IPG_EC_EEPROM_ERASEOPCODE 0x0300
|
||||
#define IPG_EC_EEPROM_BUSY 0x8000
|
||||
|
||||
/* FIFOCtrl */
|
||||
#define IPG_FC_RSVD_MASK 0xC001
|
||||
#define IPG_FC_RAM_TEST_MODE 0x0001
|
||||
#define IPG_FC_TRANSMITTING 0x4000
|
||||
#define IPG_FC_RECEIVING 0x8000
|
||||
|
||||
/* TxStatus */
|
||||
#define IPG_TS_RSVD_MASK 0xFFFF00DD
|
||||
#define IPG_TS_TX_ERROR 0x00000001
|
||||
#define IPG_TS_LATE_COLLISION 0x00000004
|
||||
#define IPG_TS_TX_MAX_COLL 0x00000008
|
||||
#define IPG_TS_TX_UNDERRUN 0x00000010
|
||||
#define IPG_TS_TX_IND_REQD 0x00000040
|
||||
#define IPG_TS_TX_COMPLETE 0x00000080
|
||||
#define IPG_TS_TX_FRAMEID 0xFFFF0000
|
||||
|
||||
/* WakeEvent */
|
||||
#define IPG_WE_WAKE_PKT_ENABLE 0x01
|
||||
#define IPG_WE_MAGIC_PKT_ENABLE 0x02
|
||||
#define IPG_WE_LINK_EVT_ENABLE 0x04
|
||||
#define IPG_WE_WAKE_POLARITY 0x08
|
||||
#define IPG_WE_WAKE_PKT_EVT 0x10
|
||||
#define IPG_WE_MAGIC_PKT_EVT 0x20
|
||||
#define IPG_WE_LINK_EVT 0x40
|
||||
#define IPG_WE_WOL_ENABLE 0x80
|
||||
|
||||
/* IntEnable */
|
||||
#define IPG_IE_RSVD_MASK 0x1FFE
|
||||
#define IPG_IE_HOST_ERROR 0x0002
|
||||
#define IPG_IE_TX_COMPLETE 0x0004
|
||||
#define IPG_IE_MAC_CTRL_FRAME 0x0008
|
||||
#define IPG_IE_RX_COMPLETE 0x0010
|
||||
#define IPG_IE_RX_EARLY 0x0020
|
||||
#define IPG_IE_INT_REQUESTED 0x0040
|
||||
#define IPG_IE_UPDATE_STATS 0x0080
|
||||
#define IPG_IE_LINK_EVENT 0x0100
|
||||
#define IPG_IE_TX_DMA_COMPLETE 0x0200
|
||||
#define IPG_IE_RX_DMA_COMPLETE 0x0400
|
||||
#define IPG_IE_RFD_LIST_END 0x0800
|
||||
#define IPG_IE_RX_DMA_PRIORITY 0x1000
|
||||
|
||||
/* IntStatus */
|
||||
#define IPG_IS_RSVD_MASK 0x1FFF
|
||||
#define IPG_IS_INTERRUPT_STATUS 0x0001
|
||||
#define IPG_IS_HOST_ERROR 0x0002
|
||||
#define IPG_IS_TX_COMPLETE 0x0004
|
||||
#define IPG_IS_MAC_CTRL_FRAME 0x0008
|
||||
#define IPG_IS_RX_COMPLETE 0x0010
|
||||
#define IPG_IS_RX_EARLY 0x0020
|
||||
#define IPG_IS_INT_REQUESTED 0x0040
|
||||
#define IPG_IS_UPDATE_STATS 0x0080
|
||||
#define IPG_IS_LINK_EVENT 0x0100
|
||||
#define IPG_IS_TX_DMA_COMPLETE 0x0200
|
||||
#define IPG_IS_RX_DMA_COMPLETE 0x0400
|
||||
#define IPG_IS_RFD_LIST_END 0x0800
|
||||
#define IPG_IS_RX_DMA_PRIORITY 0x1000
|
||||
|
||||
/* MACCtrl */
|
||||
#define IPG_MC_RSVD_MASK 0x7FE33FA3
|
||||
#define IPG_MC_IFS_SELECT 0x00000003
|
||||
#define IPG_MC_IFS_4352BIT 0x00000003
|
||||
#define IPG_MC_IFS_1792BIT 0x00000002
|
||||
#define IPG_MC_IFS_1024BIT 0x00000001
|
||||
#define IPG_MC_IFS_96BIT 0x00000000
|
||||
#define IPG_MC_DUPLEX_SELECT 0x00000020
|
||||
#define IPG_MC_DUPLEX_SELECT_FD 0x00000020
|
||||
#define IPG_MC_DUPLEX_SELECT_HD 0x00000000
|
||||
#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080
|
||||
#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100
|
||||
#define IPG_MC_RCV_FCS 0x00000200
|
||||
#define IPG_MC_FIFO_LOOPBACK 0x00000400
|
||||
#define IPG_MC_MAC_LOOPBACK 0x00000800
|
||||
#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000
|
||||
#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000
|
||||
#define IPG_MC_COLLISION_DETECT 0x00010000
|
||||
#define IPG_MC_CARRIER_SENSE 0x00020000
|
||||
#define IPG_MC_STATISTICS_ENABLE 0x00200000
|
||||
#define IPG_MC_STATISTICS_DISABLE 0x00400000
|
||||
#define IPG_MC_STATISTICS_ENABLED 0x00800000
|
||||
#define IPG_MC_TX_ENABLE 0x01000000
|
||||
#define IPG_MC_TX_DISABLE 0x02000000
|
||||
#define IPG_MC_TX_ENABLED 0x04000000
|
||||
#define IPG_MC_RX_ENABLE 0x08000000
|
||||
#define IPG_MC_RX_DISABLE 0x10000000
|
||||
#define IPG_MC_RX_ENABLED 0x20000000
|
||||
#define IPG_MC_PAUSED 0x40000000
|
||||
|
||||
/*
|
||||
* Tune
|
||||
*/
|
||||
|
||||
/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
|
||||
#define IPG_APPEND_FCS_ON_TX 1
|
||||
|
||||
/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
|
||||
#define IPG_STRIP_FCS_ON_RX 1
|
||||
|
||||
/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
|
||||
* Ethernet errors.
|
||||
*/
|
||||
#define IPG_DROP_ON_RX_ETH_ERRORS 1
|
||||
|
||||
/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
|
||||
* (via TFC).
|
||||
*/
|
||||
#define IPG_INSERT_MANUAL_VLAN_TAG 0
|
||||
|
||||
/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
|
||||
#define IPG_ADD_IPCHECKSUM_ON_TX 0
|
||||
|
||||
/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
|
||||
* DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
|
||||
*/
|
||||
#define IPG_ADD_TCPCHECKSUM_ON_TX 0
|
||||
|
||||
/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
|
||||
* DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
|
||||
*/
|
||||
#define IPG_ADD_UDPCHECKSUM_ON_TX 0
|
||||
|
||||
/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
|
||||
* constants as desired.
|
||||
*/
|
||||
#define IPG_MANUAL_VLAN_VID 0xABC
|
||||
#define IPG_MANUAL_VLAN_CFI 0x1
|
||||
#define IPG_MANUAL_VLAN_USERPRIORITY 0x5
|
||||
|
||||
#define IPG_IO_REG_RANGE 0xFF
|
||||
#define IPG_MEM_REG_RANGE 0x154
|
||||
#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet"
|
||||
#define IPG_NIC_PHY_ADDRESS 0x01
|
||||
#define IPG_DMALIST_ALIGN_PAD 0x07
|
||||
#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
|
||||
|
||||
/* Number of milliseconds to wait after issuing a software reset.
|
||||
* 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
|
||||
*/
|
||||
#define IPG_AC_RESETWAIT 0x05
|
||||
|
||||
/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
|
||||
#define IPG_AC_RESET_TIMEOUT 0x0A
|
||||
|
||||
/* Minimum number of nanoseconds used to toggle MDC clock during
|
||||
* MII/GMII register access.
|
||||
*/
|
||||
#define IPG_PC_PHYCTRLWAIT_NS 200
|
||||
|
||||
#define IPG_TFDLIST_LENGTH 0x100
|
||||
|
||||
/* Number of frames between TxDMAComplete interrupt.
|
||||
* 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
|
||||
*/
|
||||
#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
|
||||
|
||||
#define IPG_RFDLIST_LENGTH 0x100
|
||||
|
||||
/* Maximum number of RFDs to process per interrupt.
|
||||
* 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
|
||||
*/
|
||||
#define IPG_MAXRFDPROCESS_COUNT 0x80
|
||||
|
||||
/* Minimum margin between last freed RFD, and current RFD.
|
||||
* 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
|
||||
*/
|
||||
#define IPG_MINUSEDRFDSTOFREE 0x80
|
||||
|
||||
/* specify the jumbo frame maximum size
|
||||
* per unit is 0x600 (the rx_buffer size that one RFD can carry)
|
||||
*/
|
||||
#define MAX_JUMBOSIZE 0x8 /* max is 12K */
|
||||
|
||||
/* Key register values loaded at driver start up. */
|
||||
|
||||
/* TXDMAPollPeriod is specified in 320ns increments.
|
||||
*
|
||||
* Value Time
|
||||
* ---------------------
|
||||
* 0x00-0x01 320ns
|
||||
* 0x03 ~1us
|
||||
* 0x1F ~10us
|
||||
* 0xFF ~82us
|
||||
*/
|
||||
#define IPG_TXDMAPOLLPERIOD_VALUE 0x26
|
||||
|
||||
/* TxDMAUrgentThresh specifies the minimum amount of
|
||||
* data in the transmit FIFO before asserting an
|
||||
* urgent transmit DMA request.
|
||||
*
|
||||
* Value Min TxFIFO occupied space before urgent TX request
|
||||
* ---------------------------------------------------------------
|
||||
* 0x00-0x04 128 bytes (1024 bits)
|
||||
* 0x27 1248 bytes (~10000 bits)
|
||||
* 0x30 1536 bytes (12288 bits)
|
||||
* 0xFF 8192 bytes (65535 bits)
|
||||
*/
|
||||
#define IPG_TXDMAURGENTTHRESH_VALUE 0x04
|
||||
|
||||
/* TxDMABurstThresh specifies the minimum amount of
|
||||
* free space in the transmit FIFO before asserting an
|
||||
* transmit DMA request.
|
||||
*
|
||||
* Value Min TxFIFO free space before TX request
|
||||
* ----------------------------------------------------
|
||||
* 0x00-0x08 256 bytes
|
||||
* 0x30 1536 bytes
|
||||
* 0xFF 8192 bytes
|
||||
*/
|
||||
#define IPG_TXDMABURSTTHRESH_VALUE 0x30
|
||||
|
||||
/* RXDMAPollPeriod is specified in 320ns increments.
|
||||
*
|
||||
* Value Time
|
||||
* ---------------------
|
||||
* 0x00-0x01 320ns
|
||||
* 0x03 ~1us
|
||||
* 0x1F ~10us
|
||||
* 0xFF ~82us
|
||||
*/
|
||||
#define IPG_RXDMAPOLLPERIOD_VALUE 0x01
|
||||
|
||||
/* RxDMAUrgentThresh specifies the minimum amount of
|
||||
* free space within the receive FIFO before asserting
|
||||
* a urgent receive DMA request.
|
||||
*
|
||||
* Value Min RxFIFO free space before urgent RX request
|
||||
* ---------------------------------------------------------------
|
||||
* 0x00-0x04 128 bytes (1024 bits)
|
||||
* 0x27 1248 bytes (~10000 bits)
|
||||
* 0x30 1536 bytes (12288 bits)
|
||||
* 0xFF 8192 bytes (65535 bits)
|
||||
*/
|
||||
#define IPG_RXDMAURGENTTHRESH_VALUE 0x30
|
||||
|
||||
/* RxDMABurstThresh specifies the minimum amount of
|
||||
* occupied space within the receive FIFO before asserting
|
||||
* a receive DMA request.
|
||||
*
|
||||
* Value Min TxFIFO free space before TX request
|
||||
* ----------------------------------------------------
|
||||
* 0x00-0x08 256 bytes
|
||||
* 0x30 1536 bytes
|
||||
* 0xFF 8192 bytes
|
||||
*/
|
||||
#define IPG_RXDMABURSTTHRESH_VALUE 0x30
|
||||
|
||||
/* FlowOnThresh specifies the maximum amount of occupied
|
||||
* space in the receive FIFO before a PAUSE frame with
|
||||
* maximum pause time transmitted.
|
||||
*
|
||||
* Value Max RxFIFO occupied space before PAUSE
|
||||
* ---------------------------------------------------
|
||||
* 0x0000 0 bytes
|
||||
* 0x0740 29,696 bytes
|
||||
* 0x07FF 32,752 bytes
|
||||
*/
|
||||
#define IPG_FLOWONTHRESH_VALUE 0x0740
|
||||
|
||||
/* FlowOffThresh specifies the minimum amount of occupied
|
||||
* space in the receive FIFO before a PAUSE frame with
|
||||
* zero pause time is transmitted.
|
||||
*
|
||||
* Value Max RxFIFO occupied space before PAUSE
|
||||
* ---------------------------------------------------
|
||||
* 0x0000 0 bytes
|
||||
* 0x00BF 3056 bytes
|
||||
* 0x07FF 32,752 bytes
|
||||
*/
|
||||
#define IPG_FLOWOFFTHRESH_VALUE 0x00BF
|
||||
|
||||
/*
|
||||
* Miscellaneous macros.
|
||||
*/
|
||||
|
||||
/* Macros for printing debug statements. */
|
||||
#ifdef IPG_DEBUG
|
||||
# define IPG_DEBUG_MSG(fmt, args...) \
|
||||
do { \
|
||||
if (0) \
|
||||
printk(KERN_DEBUG "IPG: " fmt, ##args); \
|
||||
} while (0)
|
||||
# define IPG_DDEBUG_MSG(fmt, args...) \
|
||||
printk(KERN_DEBUG "IPG: " fmt, ##args)
|
||||
# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
|
||||
# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
|
||||
#else
|
||||
# define IPG_DEBUG_MSG(fmt, args...) \
|
||||
do { \
|
||||
if (0) \
|
||||
printk(KERN_DEBUG "IPG: " fmt, ##args); \
|
||||
} while (0)
|
||||
# define IPG_DDEBUG_MSG(fmt, args...) \
|
||||
do { \
|
||||
if (0) \
|
||||
printk(KERN_DEBUG "IPG: " fmt, ##args); \
|
||||
} while (0)
|
||||
# define IPG_DUMPRFDLIST(args)
|
||||
# define IPG_DUMPTFDLIST(args)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* End miscellaneous macros.
|
||||
*/
|
||||
|
||||
/* Transmit Frame Descriptor. The IPG supports 15 fragments,
|
||||
* however Linux requires only a single fragment. Note, each
|
||||
* TFD field is 64 bits wide.
|
||||
*/
|
||||
struct ipg_tx {
|
||||
__le64 next_desc;
|
||||
__le64 tfc;
|
||||
__le64 frag_info;
|
||||
};
|
||||
|
||||
/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
|
||||
*/
|
||||
struct ipg_rx {
|
||||
__le64 next_desc;
|
||||
__le64 rfs;
|
||||
__le64 frag_info;
|
||||
};
|
||||
|
||||
struct ipg_jumbo {
|
||||
int found_start;
|
||||
int current_size;
|
||||
struct sk_buff *skb;
|
||||
};
|
||||
|
||||
/* Structure of IPG NIC specific data. */
|
||||
struct ipg_nic_private {
|
||||
void __iomem *ioaddr;
|
||||
struct ipg_tx *txd;
|
||||
struct ipg_rx *rxd;
|
||||
dma_addr_t txd_map;
|
||||
dma_addr_t rxd_map;
|
||||
struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
|
||||
struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
|
||||
unsigned int tx_current;
|
||||
unsigned int tx_dirty;
|
||||
unsigned int rx_current;
|
||||
unsigned int rx_dirty;
|
||||
bool is_jumbo;
|
||||
struct ipg_jumbo jumbo;
|
||||
unsigned long rxfrag_size;
|
||||
unsigned long rxsupport_size;
|
||||
unsigned long max_rxframe_size;
|
||||
unsigned int rx_buf_sz;
|
||||
struct pci_dev *pdev;
|
||||
struct net_device *dev;
|
||||
struct net_device_stats stats;
|
||||
spinlock_t lock;
|
||||
int tenmbpsmode;
|
||||
|
||||
u16 led_mode;
|
||||
u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */
|
||||
|
||||
struct mutex mii_mutex;
|
||||
struct mii_if_info mii_if;
|
||||
int reset_current_tfd;
|
||||
#ifdef IPG_DEBUG
|
||||
int RFDlistendCount;
|
||||
int RFDListCheckedCount;
|
||||
int EmptyRFDListCount;
|
||||
#endif
|
||||
struct delayed_work task;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_IPG_H */
|
@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
|
||||
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
|
||||
dev->caps.port_mask[i] = dev->caps.port_type[i];
|
||||
dev->caps.phys_port_id[i] = func_cap.phys_port_id;
|
||||
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
|
||||
&dev->caps.gid_table_len[i],
|
||||
&dev->caps.pkey_table_len[i]))
|
||||
err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
|
||||
&dev->caps.gid_table_len[i],
|
||||
&dev->caps.pkey_table_len[i]);
|
||||
if (err)
|
||||
goto err_mem;
|
||||
}
|
||||
|
||||
@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
|
||||
dev->caps.uar_page_size * dev->caps.num_uars,
|
||||
(unsigned long long)
|
||||
pci_resource_len(dev->persist->pdev, 2));
|
||||
err = -ENOMEM;
|
||||
goto err_mem;
|
||||
}
|
||||
|
||||
|
@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
|
||||
struct res_counter *counter;
|
||||
struct res_counter *tmp;
|
||||
int err;
|
||||
int index;
|
||||
int *counters_arr = NULL;
|
||||
int i, j;
|
||||
|
||||
err = move_all_busy(dev, slave, RES_COUNTER);
|
||||
if (err)
|
||||
mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
|
||||
slave);
|
||||
|
||||
spin_lock_irq(mlx4_tlock(dev));
|
||||
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
|
||||
if (counter->com.owner == slave) {
|
||||
index = counter->com.res_id;
|
||||
rb_erase(&counter->com.node,
|
||||
&tracker->res_tree[RES_COUNTER]);
|
||||
list_del(&counter->com.list);
|
||||
kfree(counter);
|
||||
__mlx4_counter_free(dev, index);
|
||||
counters_arr = kmalloc_array(dev->caps.max_counters,
|
||||
sizeof(*counters_arr), GFP_KERNEL);
|
||||
if (!counters_arr)
|
||||
return;
|
||||
|
||||
do {
|
||||
i = 0;
|
||||
j = 0;
|
||||
spin_lock_irq(mlx4_tlock(dev));
|
||||
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
|
||||
if (counter->com.owner == slave) {
|
||||
counters_arr[i++] = counter->com.res_id;
|
||||
rb_erase(&counter->com.node,
|
||||
&tracker->res_tree[RES_COUNTER]);
|
||||
list_del(&counter->com.list);
|
||||
kfree(counter);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(mlx4_tlock(dev));
|
||||
|
||||
while (j < i) {
|
||||
__mlx4_counter_free(dev, counters_arr[j++]);
|
||||
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(mlx4_tlock(dev));
|
||||
} while (i);
|
||||
|
||||
kfree(counters_arr);
|
||||
}
|
||||
|
||||
static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
|
||||
|
@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
|
||||
|
||||
#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
|
||||
|
||||
enum mlx5e_dma_map_type {
|
||||
MLX5E_DMA_MAP_SINGLE,
|
||||
MLX5E_DMA_MAP_PAGE
|
||||
};
|
||||
|
||||
struct mlx5e_sq_dma {
|
||||
dma_addr_t addr;
|
||||
u32 size;
|
||||
dma_addr_t addr;
|
||||
u32 size;
|
||||
enum mlx5e_dma_map_type type;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
|
||||
u32 tirn)
|
||||
{
|
||||
void *in;
|
||||
int inlen;
|
||||
int err;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
|
||||
|
||||
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5E_NUM_TT; i++) {
|
||||
err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
|
||||
priv->tirn[i]);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev)
|
||||
goto err_clear_state_opened_flag;
|
||||
}
|
||||
|
||||
err = mlx5e_refresh_tirs_self_loopback_enable(priv);
|
||||
if (err) {
|
||||
netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
|
||||
__func__, err);
|
||||
goto err_close_channels;
|
||||
}
|
||||
|
||||
mlx5e_update_carrier(priv);
|
||||
mlx5e_redirect_rqts(priv);
|
||||
|
||||
@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_close_channels:
|
||||
mlx5e_close_channels(priv);
|
||||
err_clear_state_opened_flag:
|
||||
clear_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
return err;
|
||||
@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
|
||||
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
|
||||
|
||||
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
netdev_err(netdev,
|
||||
"%s: Bad MTU (%d) > (%d) Max\n",
|
||||
@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
||||
"Not creating net device, some required device capabilities are missing\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
|
||||
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -61,41 +61,49 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
|
||||
u32 *size)
|
||||
static inline void mlx5e_tx_dma_unmap(struct device *pdev,
|
||||
struct mlx5e_sq_dma *dma)
|
||||
{
|
||||
sq->dma_fifo_pc--;
|
||||
*addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
|
||||
*size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
|
||||
switch (dma->type) {
|
||||
case MLX5E_DMA_MAP_SINGLE:
|
||||
dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
|
||||
break;
|
||||
case MLX5E_DMA_MAP_PAGE:
|
||||
dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
|
||||
dma_addr_t addr,
|
||||
u32 size,
|
||||
enum mlx5e_dma_map_type map_type)
|
||||
{
|
||||
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
|
||||
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
|
||||
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
|
||||
sq->dma_fifo_pc++;
|
||||
}
|
||||
|
||||
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
|
||||
{
|
||||
return &sq->dma_fifo[i & sq->dma_fifo_mask];
|
||||
}
|
||||
|
||||
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
u32 size;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
|
||||
mlx5e_dma_pop_last_pushed(sq, &addr, &size);
|
||||
dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
|
||||
struct mlx5e_sq_dma *last_pushed_dma =
|
||||
mlx5e_dma_get(sq, --sq->dma_fifo_pc);
|
||||
|
||||
mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
|
||||
u32 size)
|
||||
{
|
||||
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
|
||||
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
|
||||
sq->dma_fifo_pc++;
|
||||
}
|
||||
|
||||
static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
|
||||
u32 *size)
|
||||
{
|
||||
*addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
|
||||
*size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
|
||||
}
|
||||
|
||||
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
*/
|
||||
#define MLX5E_MIN_INLINE ETH_HLEN
|
||||
|
||||
if (bf && (skb_headlen(skb) <= sq->max_inline))
|
||||
return skb_headlen(skb);
|
||||
if (bf) {
|
||||
u16 ihs = skb_headlen(skb);
|
||||
|
||||
if (skb_vlan_tag_present(skb))
|
||||
ihs += VLAN_HLEN;
|
||||
|
||||
if (ihs <= sq->max_inline)
|
||||
return skb_headlen(skb);
|
||||
}
|
||||
|
||||
return MLX5E_MIN_INLINE;
|
||||
}
|
||||
@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
|
||||
dseg->lkey = sq->mkey_be;
|
||||
dseg->byte_count = cpu_to_be32(headlen);
|
||||
|
||||
mlx5e_dma_push(sq, dma_addr, headlen);
|
||||
mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
|
||||
MLX5E_TX_SKB_CB(skb)->num_dma++;
|
||||
|
||||
dseg++;
|
||||
@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
|
||||
dseg->lkey = sq->mkey_be;
|
||||
dseg->byte_count = cpu_to_be32(fsz);
|
||||
|
||||
mlx5e_dma_push(sq, dma_addr, fsz);
|
||||
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
|
||||
MLX5E_TX_SKB_CB(skb)->num_dma++;
|
||||
|
||||
dseg++;
|
||||
@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
|
||||
}
|
||||
|
||||
for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
|
||||
dma_addr_t addr;
|
||||
u32 size;
|
||||
struct mlx5e_sq_dma *dma =
|
||||
mlx5e_dma_get(sq, dma_fifo_cc++);
|
||||
|
||||
mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
|
||||
dma_fifo_cc++;
|
||||
dma_unmap_single(sq->pdev, addr, size,
|
||||
DMA_TO_DEVICE);
|
||||
mlx5e_tx_dma_unmap(sq->pdev, dma);
|
||||
}
|
||||
|
||||
npkts++;
|
||||
|
@ -7429,15 +7429,15 @@ process_pkt:
|
||||
|
||||
rtl8169_rx_vlan_tag(desc, skb);
|
||||
|
||||
if (skb->pkt_type == PACKET_MULTICAST)
|
||||
dev->stats.multicast++;
|
||||
|
||||
napi_gro_receive(&tp->napi, skb);
|
||||
|
||||
u64_stats_update_begin(&tp->rx_stats.syncp);
|
||||
tp->rx_stats.packets++;
|
||||
tp->rx_stats.bytes += pkt_size;
|
||||
u64_stats_update_end(&tp->rx_stats.syncp);
|
||||
|
||||
if (skb->pkt_type == PACKET_MULTICAST)
|
||||
dev->stats.multicast++;
|
||||
}
|
||||
release_descriptor:
|
||||
desc->opts2 = 0;
|
||||
|
@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev)
|
||||
/* Interrupt enable: */
|
||||
/* Frame receive */
|
||||
ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
|
||||
/* Receive FIFO full warning */
|
||||
ravb_write(ndev, RIC1_RFWE, RIC1);
|
||||
/* Receive FIFO full error, descriptor empty */
|
||||
ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
|
||||
/* Frame transmitted, timestamp FIFO updated */
|
||||
@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
|
||||
((tis & tic) & BIT(q))) {
|
||||
if (napi_schedule_prep(&priv->napi[q])) {
|
||||
/* Mask RX and TX interrupts */
|
||||
ravb_write(ndev, ric0 & ~BIT(q), RIC0);
|
||||
ravb_write(ndev, tic & ~BIT(q), TIC);
|
||||
ric0 &= ~BIT(q);
|
||||
tic &= ~BIT(q);
|
||||
ravb_write(ndev, ric0, RIC0);
|
||||
ravb_write(ndev, tic, TIC);
|
||||
__napi_schedule(&priv->napi[q]);
|
||||
} else {
|
||||
netdev_warn(ndev,
|
||||
|
@ -3422,7 +3422,7 @@ out:
|
||||
* with our request for slot reset the mmio_enabled callback will never be
|
||||
* called, and the link_reset callback is not used by AER or EEH mechanisms.
|
||||
*/
|
||||
static struct pci_error_handlers efx_err_handlers = {
|
||||
static const struct pci_error_handlers efx_err_handlers = {
|
||||
.error_detected = efx_io_error_detected,
|
||||
.slot_reset = efx_io_slot_reset,
|
||||
.resume = efx_io_resume,
|
||||
|
@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
|
||||
|
||||
static int smsc911x_phy_reset(struct smsc911x_data *pdata)
|
||||
{
|
||||
struct phy_device *phy_dev = pdata->phy_dev;
|
||||
unsigned int temp;
|
||||
unsigned int i = 100000;
|
||||
|
||||
BUG_ON(!phy_dev);
|
||||
BUG_ON(!phy_dev->bus);
|
||||
|
||||
SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
|
||||
smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
|
||||
temp = smsc911x_reg_read(pdata, PMT_CTRL);
|
||||
smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_);
|
||||
do {
|
||||
msleep(1);
|
||||
temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr,
|
||||
MII_BMCR);
|
||||
} while ((i--) && (temp & BMCR_RESET));
|
||||
temp = smsc911x_reg_read(pdata, PMT_CTRL);
|
||||
} while ((i--) && (temp & PMT_CTRL_PHY_RST_));
|
||||
|
||||
if (temp & BMCR_RESET) {
|
||||
if (unlikely(temp & PMT_CTRL_PHY_RST_)) {
|
||||
SMSC_WARN(pdata, hw, "PHY reset failed to complete");
|
||||
return -EIO;
|
||||
}
|
||||
@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev)
|
||||
}
|
||||
|
||||
/* Reset the LAN911x */
|
||||
if (smsc911x_soft_reset(pdata))
|
||||
if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata))
|
||||
return -ENODEV;
|
||||
|
||||
dev->flags |= IFF_MULTICAST;
|
||||
|
@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
|
||||
QSGMII_PHY_RX_SIGNAL_DETECT_EN |
|
||||
QSGMII_PHY_TX_DRIVER_EN |
|
||||
QSGMII_PHY_QSGMII_EN |
|
||||
0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
|
||||
0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
|
||||
0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
|
||||
0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
|
||||
0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
|
||||
0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
|
||||
0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
|
||||
0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
|
||||
0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
|
||||
0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
|
||||
}
|
||||
|
||||
plat_dat->has_gmac = true;
|
||||
|
@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
|
||||
*/
|
||||
VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
|
||||
|
||||
#define VAL_PKT_LEN_DEF 0
|
||||
/* ValPktLen[] is used for setting the checksum offload ability of NIC.
|
||||
0: Receive frame with invalid layer 2 length (Default)
|
||||
1: Drop frame with invalid layer 2 length
|
||||
*/
|
||||
VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
|
||||
|
||||
#define WOL_OPT_DEF 0
|
||||
#define WOL_OPT_MIN 0
|
||||
#define WOL_OPT_MAX 7
|
||||
@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index,
|
||||
|
||||
velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
|
||||
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
|
||||
velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
|
||||
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
|
||||
velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
|
||||
opts->numrx = (opts->numrx & ~3);
|
||||
@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
||||
int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
|
||||
VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
|
||||
if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
|
||||
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
|
||||
VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
|
||||
stats->rx_length_errors++;
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
||||
dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
|
||||
vptr->rx.buf_sz, DMA_FROM_DEVICE);
|
||||
|
||||
/*
|
||||
* Drop frame not meeting IEEE 802.3
|
||||
*/
|
||||
|
||||
if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
|
||||
if (rd->rdesc0.RSR & RSR_RL) {
|
||||
stats->rx_length_errors++;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
velocity_rx_csum(rd, skb);
|
||||
|
||||
if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
|
||||
|
@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
|
||||
FJES_CMD_REQ_RES_CODE_BUSY) &&
|
||||
(timeout > 0)) {
|
||||
msleep(200 + hw->my_epid * 20);
|
||||
timeout -= (200 + hw->my_epid * 20);
|
||||
timeout -= (200 + hw->my_epid * 20);
|
||||
|
||||
res_buf->unshare_buffer.length = 0;
|
||||
res_buf->unshare_buffer.code = 0;
|
||||
|
@ -254,7 +254,7 @@ acct:
|
||||
}
|
||||
}
|
||||
|
||||
static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
|
||||
static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
|
||||
bool local)
|
||||
{
|
||||
struct ipvl_dev *ipvlan = addr->master;
|
||||
@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
|
||||
unsigned int len;
|
||||
rx_handler_result_t ret = RX_HANDLER_CONSUMED;
|
||||
bool success = false;
|
||||
struct sk_buff *skb = *pskb;
|
||||
|
||||
len = skb->len + ETH_HLEN;
|
||||
if (unlikely(!(dev->flags & IFF_UP))) {
|
||||
@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
*pskb = skb;
|
||||
skb->dev = dev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
|
||||
@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
|
||||
if (addr)
|
||||
return ipvlan_rcv_frame(addr, skb, true);
|
||||
return ipvlan_rcv_frame(addr, &skb, true);
|
||||
|
||||
out:
|
||||
skb->dev = ipvlan->phy_dev;
|
||||
@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
|
||||
if (lyr3h) {
|
||||
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
|
||||
if (addr)
|
||||
return ipvlan_rcv_frame(addr, skb, true);
|
||||
return ipvlan_rcv_frame(addr, &skb, true);
|
||||
}
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
|
||||
|
||||
addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
|
||||
if (addr)
|
||||
ret = ipvlan_rcv_frame(addr, skb, false);
|
||||
ret = ipvlan_rcv_frame(addr, pskb, false);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
|
||||
|
||||
addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
|
||||
if (addr)
|
||||
ret = ipvlan_rcv_frame(addr, skb, false);
|
||||
ret = ipvlan_rcv_frame(addr, pskb, false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
|
||||
WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
|
||||
port->mode);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
|
@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
|
||||
skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
|
||||
if (!skb)
|
||||
return RX_HANDLER_CONSUMED;
|
||||
*pskb = skb;
|
||||
eth = eth_hdr(skb);
|
||||
macvlan_forward_source(skb, port, eth->h_source);
|
||||
src = macvlan_hash_lookup(port, eth->h_source);
|
||||
@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
*pskb = skb;
|
||||
skb->dev = dev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
|
||||
|
@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = {
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = at803x_ack_interrupt,
|
||||
.config_intr = at803x_config_intr,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = {
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = at803x_ack_interrupt,
|
||||
.config_intr = at803x_config_intr,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
|
@ -1153,6 +1153,21 @@ static struct phy_driver marvell_drivers[] = {
|
||||
.suspend = &genphy_suspend,
|
||||
.driver = { .owner = THIS_MODULE },
|
||||
},
|
||||
{
|
||||
.phy_id = MARVELL_PHY_ID_88E1540,
|
||||
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||
.name = "Marvell 88E1540",
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = &m88e1510_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
.ack_interrupt = &marvell_ack_interrupt,
|
||||
.config_intr = &marvell_config_intr,
|
||||
.did_interrupt = &m88e1121_did_interrupt,
|
||||
.resume = &genphy_resume,
|
||||
.suspend = &genphy_suspend,
|
||||
.driver = { .owner = THIS_MODULE },
|
||||
},
|
||||
{
|
||||
.phy_id = MARVELL_PHY_ID_88E3016,
|
||||
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||
@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
|
||||
{ MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
|
||||
{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
|
||||
{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
|
||||
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
|
||||
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
|
||||
{ }
|
||||
};
|
||||
|
@ -863,6 +863,9 @@ void phy_state_machine(struct work_struct *work)
|
||||
needs_aneg = true;
|
||||
break;
|
||||
case PHY_NOLINK:
|
||||
if (phy_interrupt_is_valid(phydev))
|
||||
break;
|
||||
|
||||
err = phy_read_status(phydev);
|
||||
if (err)
|
||||
break;
|
||||
|
@ -66,6 +66,7 @@
|
||||
#define PHY_ID_VSC8244 0x000fc6c0
|
||||
#define PHY_ID_VSC8514 0x00070670
|
||||
#define PHY_ID_VSC8574 0x000704a0
|
||||
#define PHY_ID_VSC8601 0x00070420
|
||||
#define PHY_ID_VSC8662 0x00070660
|
||||
#define PHY_ID_VSC8221 0x000fc550
|
||||
#define PHY_ID_VSC8211 0x000fc4b0
|
||||
@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
|
||||
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
|
||||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
|
||||
phydev->drv->phy_id == PHY_ID_VSC8514 ||
|
||||
phydev->drv->phy_id == PHY_ID_VSC8574) ?
|
||||
phydev->drv->phy_id == PHY_ID_VSC8574 ||
|
||||
phydev->drv->phy_id == PHY_ID_VSC8601) ?
|
||||
MII_VSC8244_IMASK_MASK :
|
||||
MII_VSC8221_IMASK_MASK);
|
||||
else {
|
||||
@ -271,6 +273,18 @@ static struct phy_driver vsc82xx_driver[] = {
|
||||
.ack_interrupt = &vsc824x_ack_interrupt,
|
||||
.config_intr = &vsc82xx_config_intr,
|
||||
.driver = { .owner = THIS_MODULE,},
|
||||
}, {
|
||||
.phy_id = PHY_ID_VSC8601,
|
||||
.name = "Vitesse VSC8601",
|
||||
.phy_id_mask = 0x000ffff0,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_init = &genphy_config_init,
|
||||
.config_aneg = &genphy_config_aneg,
|
||||
.read_status = &genphy_read_status,
|
||||
.ack_interrupt = &vsc824x_ack_interrupt,
|
||||
.config_intr = &vsc82xx_config_intr,
|
||||
.driver = { .owner = THIS_MODULE,},
|
||||
}, {
|
||||
.phy_id = PHY_ID_VSC8662,
|
||||
.name = "Vitesse VSC8662",
|
||||
|
@ -695,6 +695,11 @@ static const struct usb_device_id products[] = {
|
||||
USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (kernel_ulong_t) &wwan_info,
|
||||
}, {
|
||||
/* Dell DW5580 modules */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (kernel_ulong_t)&wwan_info,
|
||||
}, {
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
|
@ -2157,12 +2157,13 @@ vmxnet3_set_mc(struct net_device *netdev)
|
||||
if (!netdev_mc_empty(netdev)) {
|
||||
new_table = vmxnet3_copy_mc(netdev);
|
||||
if (new_table) {
|
||||
rxConf->mfTableLen = cpu_to_le16(
|
||||
netdev_mc_count(netdev) * ETH_ALEN);
|
||||
size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
|
||||
|
||||
rxConf->mfTableLen = cpu_to_le16(sz);
|
||||
new_table_pa = dma_map_single(
|
||||
&adapter->pdev->dev,
|
||||
new_table,
|
||||
rxConf->mfTableLen,
|
||||
sz,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
|
@ -69,10 +69,10 @@
|
||||
/*
|
||||
* Version numbers
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k"
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k"
|
||||
|
||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01040300
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01040400
|
||||
|
||||
#if defined(CONFIG_PCI_MSI)
|
||||
/* RSS only makes sense if MSI-X is supported. */
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define MARVELL_PHY_ID_88E1318S 0x01410e90
|
||||
#define MARVELL_PHY_ID_88E1116R 0x01410e40
|
||||
#define MARVELL_PHY_ID_88E1510 0x01410dd0
|
||||
#define MARVELL_PHY_ID_88E1540 0x01410eb0
|
||||
#define MARVELL_PHY_ID_88E3016 0x01410e60
|
||||
|
||||
/* struct phy_device dev_flags definitions */
|
||||
|
@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 lro_cap[0x1];
|
||||
u8 lro_psh_flag[0x1];
|
||||
u8 lro_time_stamp[0x1];
|
||||
u8 reserved_0[0x6];
|
||||
u8 reserved_0[0x3];
|
||||
u8 self_lb_en_modifiable[0x1];
|
||||
u8 reserved_1[0x2];
|
||||
u8 max_lso_cap[0x5];
|
||||
u8 reserved_1[0x4];
|
||||
u8 reserved_2[0x4];
|
||||
u8 rss_ind_tbl_cap[0x4];
|
||||
u8 reserved_2[0x3];
|
||||
u8 reserved_3[0x3];
|
||||
u8 tunnel_lso_const_out_ip_id[0x1];
|
||||
u8 reserved_3[0x2];
|
||||
u8 reserved_4[0x2];
|
||||
u8 tunnel_statless_gre[0x1];
|
||||
u8 tunnel_stateless_vxlan[0x1];
|
||||
|
||||
u8 reserved_4[0x20];
|
||||
u8 reserved_5[0x20];
|
||||
|
||||
u8 reserved_5[0x10];
|
||||
u8 reserved_6[0x10];
|
||||
u8 lro_min_mss_size[0x10];
|
||||
|
||||
u8 reserved_6[0x120];
|
||||
u8 reserved_7[0x120];
|
||||
|
||||
u8 lro_timer_supported_periods[4][0x20];
|
||||
|
||||
u8 reserved_7[0x600];
|
||||
u8 reserved_8[0x600];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_roce_cap_bits {
|
||||
@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_tir_bitmask_bits {
|
||||
u8 reserved[0x20];
|
||||
u8 reserved_0[0x20];
|
||||
|
||||
u8 reserved1[0x1f];
|
||||
u8 reserved_1[0x1b];
|
||||
u8 self_lb_en[0x1];
|
||||
u8 reserved_2[0x3];
|
||||
u8 lro[0x1];
|
||||
};
|
||||
|
||||
|
@ -2068,20 +2068,23 @@ struct pcpu_sw_netstats {
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
#define netdev_alloc_pcpu_stats(type) \
|
||||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
|
||||
if (pcpu_stats) { \
|
||||
int __cpu; \
|
||||
for_each_possible_cpu(__cpu) { \
|
||||
typeof(type) *stat; \
|
||||
stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
||||
u64_stats_init(&stat->syncp); \
|
||||
} \
|
||||
} \
|
||||
pcpu_stats; \
|
||||
#define __netdev_alloc_pcpu_stats(type, gfp) \
|
||||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
|
||||
if (pcpu_stats) { \
|
||||
int __cpu; \
|
||||
for_each_possible_cpu(__cpu) { \
|
||||
typeof(type) *stat; \
|
||||
stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
||||
u64_stats_init(&stat->syncp); \
|
||||
} \
|
||||
} \
|
||||
pcpu_stats; \
|
||||
})
|
||||
|
||||
#define netdev_alloc_pcpu_stats(type) \
|
||||
__netdev_alloc_pcpu_stats(type, GFP_KERNEL);
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
/* netdevice notifier chain. Please remember to update the rtnetlink
|
||||
@ -3854,6 +3857,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev)
|
||||
return dev->priv_flags & IFF_EBRIDGE;
|
||||
}
|
||||
|
||||
static inline bool netif_is_bridge_port(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_BRIDGE_PORT;
|
||||
}
|
||||
|
||||
static inline bool netif_is_ovs_master(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_OPENVSWITCH;
|
||||
|
@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
|
||||
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
|
||||
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
|
||||
extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
|
||||
size_t len);
|
||||
size_t len, size_t align);
|
||||
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
|
||||
struct ip_set_ext *ext);
|
||||
|
||||
|
@ -5,10 +5,13 @@
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
static inline int nf_hook_ingress_active(struct sk_buff *skb)
|
||||
static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
|
||||
{
|
||||
return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
|
||||
NFPROTO_NETDEV, NF_NETDEV_INGRESS);
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
|
||||
return false;
|
||||
#endif
|
||||
return !list_empty(&skb->dev->nf_hooks_ingress);
|
||||
}
|
||||
|
||||
static inline int nf_hook_ingress(struct sk_buff *skb)
|
||||
@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
|
||||
struct nf_hook_state state;
|
||||
|
||||
nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
|
||||
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
|
||||
skb->dev, NULL, dev_net(skb->dev), NULL);
|
||||
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
|
||||
skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
|
||||
return nf_hook_slow(skb, &state);
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
|
||||
|
||||
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
|
||||
{
|
||||
if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
|
||||
if (rt->rt6i_flags & RTF_PCPU ||
|
||||
(unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
|
||||
rt = (struct rt6_info *)(rt->dst.from);
|
||||
|
||||
return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
|
||||
|
@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
|
||||
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
|
||||
|
||||
if (net_xmit_eval(err) == 0) {
|
||||
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
|
||||
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->tx_bytes += pkt_len;
|
||||
tstats->tx_packets++;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
put_cpu_ptr(tstats);
|
||||
} else {
|
||||
stats->tx_errors++;
|
||||
stats->tx_aborted_errors++;
|
||||
|
@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
|
||||
struct pcpu_sw_netstats __percpu *stats)
|
||||
{
|
||||
if (err > 0) {
|
||||
struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
|
||||
struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
|
||||
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->tx_bytes += err;
|
||||
tstats->tx_packets++;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
put_cpu_ptr(tstats);
|
||||
} else if (err < 0) {
|
||||
err_stats->tx_errors++;
|
||||
err_stats->tx_aborted_errors++;
|
||||
|
@ -618,6 +618,8 @@ struct nft_expr_ops {
|
||||
void (*eval)(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt);
|
||||
int (*clone)(struct nft_expr *dst,
|
||||
const struct nft_expr *src);
|
||||
unsigned int size;
|
||||
|
||||
int (*init)(const struct nft_ctx *ctx,
|
||||
@ -660,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
|
||||
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
|
||||
const struct nft_expr *expr);
|
||||
|
||||
static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
|
||||
static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
|
||||
{
|
||||
int err;
|
||||
|
||||
__module_get(src->ops->type->owner);
|
||||
memcpy(dst, src, src->ops->size);
|
||||
if (src->ops->clone) {
|
||||
dst->ops = src->ops;
|
||||
err = src->ops->clone(dst, src);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else {
|
||||
memcpy(dst, src, src->ops->size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2226,6 +2226,31 @@ static inline bool sk_listener(const struct sock *sk)
|
||||
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
|
||||
}
|
||||
|
||||
/**
|
||||
* sk_state_load - read sk->sk_state for lockless contexts
|
||||
* @sk: socket pointer
|
||||
*
|
||||
* Paired with sk_state_store(). Used in places we do not hold socket lock :
|
||||
* tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
|
||||
*/
|
||||
static inline int sk_state_load(const struct sock *sk)
|
||||
{
|
||||
return smp_load_acquire(&sk->sk_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* sk_state_store - update sk->sk_state
|
||||
* @sk: socket pointer
|
||||
* @newstate: new state
|
||||
*
|
||||
* Paired with sk_state_load(). Should be used in contexts where
|
||||
* state change might impact lockless readers.
|
||||
*/
|
||||
static inline void sk_state_store(struct sock *sk, int newstate)
|
||||
{
|
||||
smp_store_release(&sk->sk_state, newstate);
|
||||
}
|
||||
|
||||
void sock_enable_timestamp(struct sock *sk, int flag);
|
||||
int sock_get_timestamp(struct sock *, struct timeval __user *);
|
||||
int sock_get_timestampns(struct sock *, struct timespec __user *);
|
||||
|
@ -323,7 +323,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
|
||||
struct net_device *filter_dev,
|
||||
int idx)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
|
||||
|
@ -30,7 +30,9 @@ bool vlan_do_receive(struct sk_buff **skbp)
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
}
|
||||
|
||||
if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
|
||||
if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
|
||||
!netif_is_macvlan_port(vlan_dev) &&
|
||||
!netif_is_bridge_port(vlan_dev)) {
|
||||
unsigned int offset = skb->data - skb_mac_header(skb);
|
||||
|
||||
/*
|
||||
|
@ -48,7 +48,7 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
|
||||
|
||||
p->state = state;
|
||||
err = switchdev_port_attr_set(p->dev, &attr);
|
||||
if (err)
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
|
||||
(unsigned int) p->port_no, p->dev->name);
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ void br_init_port(struct net_bridge_port *p)
|
||||
p->config_pending = 0;
|
||||
|
||||
err = switchdev_port_attr_set(p->dev, &attr);
|
||||
if (err)
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
netdev_err(p->dev, "failed to set HW ageing time\n");
|
||||
}
|
||||
|
||||
|
@ -2403,17 +2403,20 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
|
||||
{
|
||||
static const netdev_features_t null_features = 0;
|
||||
struct net_device *dev = skb->dev;
|
||||
const char *driver = "";
|
||||
const char *name = "";
|
||||
|
||||
if (!net_ratelimit())
|
||||
return;
|
||||
|
||||
if (dev && dev->dev.parent)
|
||||
driver = dev_driver_string(dev->dev.parent);
|
||||
|
||||
if (dev) {
|
||||
if (dev->dev.parent)
|
||||
name = dev_driver_string(dev->dev.parent);
|
||||
else
|
||||
name = netdev_name(dev);
|
||||
}
|
||||
WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
|
||||
"gso_type=%d ip_summed=%d\n",
|
||||
driver, dev ? &dev->features : &null_features,
|
||||
name, dev ? &dev->features : &null_features,
|
||||
skb->sk ? &skb->sk->sk_route_caps : &null_features,
|
||||
skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
|
||||
skb_shinfo(skb)->gso_type, skb->ip_summed);
|
||||
@ -6426,11 +6429,16 @@ int __netdev_update_features(struct net_device *dev)
|
||||
|
||||
if (dev->netdev_ops->ndo_set_features)
|
||||
err = dev->netdev_ops->ndo_set_features(dev, features);
|
||||
else
|
||||
err = 0;
|
||||
|
||||
if (unlikely(err < 0)) {
|
||||
netdev_err(dev,
|
||||
"set_features() failed (%d); wanted %pNF, left %pNF\n",
|
||||
err, &features, &dev->features);
|
||||
/* return non-0 since some features might have changed and
|
||||
* it's better to fire a spurious notification than miss it
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -857,7 +857,7 @@ static void neigh_probe(struct neighbour *neigh)
|
||||
struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
|
||||
/* keep skb alive even if arp_queue overflows */
|
||||
if (skb)
|
||||
skb = skb_copy(skb, GFP_ATOMIC);
|
||||
skb = skb_clone(skb, GFP_ATOMIC);
|
||||
write_unlock(&neigh->lock);
|
||||
neigh->ops->solicit(neigh, skb);
|
||||
atomic_inc(&neigh->probes);
|
||||
|
@ -1045,15 +1045,156 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
const struct rtnl_link_stats64 *stats;
|
||||
struct rtnl_link_stats64 temp;
|
||||
struct nlattr *attr;
|
||||
|
||||
stats = dev_get_stats(dev, &temp);
|
||||
|
||||
attr = nla_reserve(skb, IFLA_STATS,
|
||||
sizeof(struct rtnl_link_stats));
|
||||
if (!attr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
copy_rtnl_link_stats(nla_data(attr), stats);
|
||||
|
||||
attr = nla_reserve(skb, IFLA_STATS64,
|
||||
sizeof(struct rtnl_link_stats64));
|
||||
if (!attr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
copy_rtnl_link_stats64(nla_data(attr), stats);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
int vfs_num,
|
||||
struct nlattr *vfinfo)
|
||||
{
|
||||
struct ifla_vf_rss_query_en vf_rss_query_en;
|
||||
struct ifla_vf_link_state vf_linkstate;
|
||||
struct ifla_vf_spoofchk vf_spoofchk;
|
||||
struct ifla_vf_tx_rate vf_tx_rate;
|
||||
struct ifla_vf_stats vf_stats;
|
||||
struct ifla_vf_trust vf_trust;
|
||||
struct ifla_vf_vlan vf_vlan;
|
||||
struct ifla_vf_rate vf_rate;
|
||||
struct nlattr *vf, *vfstats;
|
||||
struct ifla_vf_mac vf_mac;
|
||||
struct ifla_vf_info ivi;
|
||||
|
||||
/* Not all SR-IOV capable drivers support the
|
||||
* spoofcheck and "RSS query enable" query. Preset to
|
||||
* -1 so the user space tool can detect that the driver
|
||||
* didn't report anything.
|
||||
*/
|
||||
ivi.spoofchk = -1;
|
||||
ivi.rss_query_en = -1;
|
||||
ivi.trusted = -1;
|
||||
memset(ivi.mac, 0, sizeof(ivi.mac));
|
||||
/* The default value for VF link state is "auto"
|
||||
* IFLA_VF_LINK_STATE_AUTO which equals zero
|
||||
*/
|
||||
ivi.linkstate = 0;
|
||||
if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
|
||||
return 0;
|
||||
|
||||
vf_mac.vf =
|
||||
vf_vlan.vf =
|
||||
vf_rate.vf =
|
||||
vf_tx_rate.vf =
|
||||
vf_spoofchk.vf =
|
||||
vf_linkstate.vf =
|
||||
vf_rss_query_en.vf =
|
||||
vf_trust.vf = ivi.vf;
|
||||
|
||||
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
|
||||
vf_vlan.vlan = ivi.vlan;
|
||||
vf_vlan.qos = ivi.qos;
|
||||
vf_tx_rate.rate = ivi.max_tx_rate;
|
||||
vf_rate.min_tx_rate = ivi.min_tx_rate;
|
||||
vf_rate.max_tx_rate = ivi.max_tx_rate;
|
||||
vf_spoofchk.setting = ivi.spoofchk;
|
||||
vf_linkstate.link_state = ivi.linkstate;
|
||||
vf_rss_query_en.setting = ivi.rss_query_en;
|
||||
vf_trust.setting = ivi.trusted;
|
||||
vf = nla_nest_start(skb, IFLA_VF_INFO);
|
||||
if (!vf) {
|
||||
nla_nest_cancel(skb, vfinfo);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
|
||||
nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
|
||||
nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
|
||||
&vf_rate) ||
|
||||
nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
|
||||
&vf_tx_rate) ||
|
||||
nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
|
||||
&vf_spoofchk) ||
|
||||
nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
|
||||
&vf_linkstate) ||
|
||||
nla_put(skb, IFLA_VF_RSS_QUERY_EN,
|
||||
sizeof(vf_rss_query_en),
|
||||
&vf_rss_query_en) ||
|
||||
nla_put(skb, IFLA_VF_TRUST,
|
||||
sizeof(vf_trust), &vf_trust))
|
||||
return -EMSGSIZE;
|
||||
memset(&vf_stats, 0, sizeof(vf_stats));
|
||||
if (dev->netdev_ops->ndo_get_vf_stats)
|
||||
dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
|
||||
&vf_stats);
|
||||
vfstats = nla_nest_start(skb, IFLA_VF_STATS);
|
||||
if (!vfstats) {
|
||||
nla_nest_cancel(skb, vf);
|
||||
nla_nest_cancel(skb, vfinfo);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
|
||||
vf_stats.rx_packets) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
|
||||
vf_stats.tx_packets) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
|
||||
vf_stats.rx_bytes) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
|
||||
vf_stats.tx_bytes) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
|
||||
vf_stats.broadcast) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
|
||||
vf_stats.multicast))
|
||||
return -EMSGSIZE;
|
||||
nla_nest_end(skb, vfstats);
|
||||
nla_nest_end(skb, vf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct rtnl_link_ifmap map = {
|
||||
.mem_start = dev->mem_start,
|
||||
.mem_end = dev->mem_end,
|
||||
.base_addr = dev->base_addr,
|
||||
.irq = dev->irq,
|
||||
.dma = dev->dma,
|
||||
.port = dev->if_port,
|
||||
};
|
||||
if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
|
||||
return -EMSGSIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
|
||||
int type, u32 pid, u32 seq, u32 change,
|
||||
unsigned int flags, u32 ext_filter_mask)
|
||||
{
|
||||
struct ifinfomsg *ifm;
|
||||
struct nlmsghdr *nlh;
|
||||
struct rtnl_link_stats64 temp;
|
||||
const struct rtnl_link_stats64 *stats;
|
||||
struct nlattr *attr, *af_spec;
|
||||
struct nlattr *af_spec;
|
||||
struct rtnl_af_ops *af_ops;
|
||||
struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
|
||||
|
||||
@ -1096,18 +1237,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
|
||||
nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (1) {
|
||||
struct rtnl_link_ifmap map = {
|
||||
.mem_start = dev->mem_start,
|
||||
.mem_end = dev->mem_end,
|
||||
.base_addr = dev->base_addr,
|
||||
.irq = dev->irq,
|
||||
.dma = dev->dma,
|
||||
.port = dev->if_port,
|
||||
};
|
||||
if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (rtnl_fill_link_ifmap(skb, dev))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (dev->addr_len) {
|
||||
if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
|
||||
@ -1124,128 +1255,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
|
||||
if (rtnl_phys_switch_id_fill(skb, dev))
|
||||
goto nla_put_failure;
|
||||
|
||||
attr = nla_reserve(skb, IFLA_STATS,
|
||||
sizeof(struct rtnl_link_stats));
|
||||
if (attr == NULL)
|
||||
if (rtnl_fill_stats(skb, dev))
|
||||
goto nla_put_failure;
|
||||
|
||||
stats = dev_get_stats(dev, &temp);
|
||||
copy_rtnl_link_stats(nla_data(attr), stats);
|
||||
|
||||
attr = nla_reserve(skb, IFLA_STATS64,
|
||||
sizeof(struct rtnl_link_stats64));
|
||||
if (attr == NULL)
|
||||
goto nla_put_failure;
|
||||
copy_rtnl_link_stats64(nla_data(attr), stats);
|
||||
|
||||
if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
|
||||
nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
|
||||
&& (ext_filter_mask & RTEXT_FILTER_VF)) {
|
||||
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
|
||||
ext_filter_mask & RTEXT_FILTER_VF) {
|
||||
int i;
|
||||
|
||||
struct nlattr *vfinfo, *vf, *vfstats;
|
||||
struct nlattr *vfinfo;
|
||||
int num_vfs = dev_num_vf(dev->dev.parent);
|
||||
|
||||
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
|
||||
if (!vfinfo)
|
||||
goto nla_put_failure;
|
||||
for (i = 0; i < num_vfs; i++) {
|
||||
struct ifla_vf_info ivi;
|
||||
struct ifla_vf_mac vf_mac;
|
||||
struct ifla_vf_vlan vf_vlan;
|
||||
struct ifla_vf_rate vf_rate;
|
||||
struct ifla_vf_tx_rate vf_tx_rate;
|
||||
struct ifla_vf_spoofchk vf_spoofchk;
|
||||
struct ifla_vf_link_state vf_linkstate;
|
||||
struct ifla_vf_rss_query_en vf_rss_query_en;
|
||||
struct ifla_vf_stats vf_stats;
|
||||
struct ifla_vf_trust vf_trust;
|
||||
|
||||
/*
|
||||
* Not all SR-IOV capable drivers support the
|
||||
* spoofcheck and "RSS query enable" query. Preset to
|
||||
* -1 so the user space tool can detect that the driver
|
||||
* didn't report anything.
|
||||
*/
|
||||
ivi.spoofchk = -1;
|
||||
ivi.rss_query_en = -1;
|
||||
ivi.trusted = -1;
|
||||
memset(ivi.mac, 0, sizeof(ivi.mac));
|
||||
/* The default value for VF link state is "auto"
|
||||
* IFLA_VF_LINK_STATE_AUTO which equals zero
|
||||
*/
|
||||
ivi.linkstate = 0;
|
||||
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
|
||||
break;
|
||||
vf_mac.vf =
|
||||
vf_vlan.vf =
|
||||
vf_rate.vf =
|
||||
vf_tx_rate.vf =
|
||||
vf_spoofchk.vf =
|
||||
vf_linkstate.vf =
|
||||
vf_rss_query_en.vf =
|
||||
vf_trust.vf = ivi.vf;
|
||||
|
||||
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
|
||||
vf_vlan.vlan = ivi.vlan;
|
||||
vf_vlan.qos = ivi.qos;
|
||||
vf_tx_rate.rate = ivi.max_tx_rate;
|
||||
vf_rate.min_tx_rate = ivi.min_tx_rate;
|
||||
vf_rate.max_tx_rate = ivi.max_tx_rate;
|
||||
vf_spoofchk.setting = ivi.spoofchk;
|
||||
vf_linkstate.link_state = ivi.linkstate;
|
||||
vf_rss_query_en.setting = ivi.rss_query_en;
|
||||
vf_trust.setting = ivi.trusted;
|
||||
vf = nla_nest_start(skb, IFLA_VF_INFO);
|
||||
if (!vf) {
|
||||
nla_nest_cancel(skb, vfinfo);
|
||||
if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
|
||||
nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
|
||||
nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
|
||||
&vf_rate) ||
|
||||
nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
|
||||
&vf_tx_rate) ||
|
||||
nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
|
||||
&vf_spoofchk) ||
|
||||
nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
|
||||
&vf_linkstate) ||
|
||||
nla_put(skb, IFLA_VF_RSS_QUERY_EN,
|
||||
sizeof(vf_rss_query_en),
|
||||
&vf_rss_query_en) ||
|
||||
nla_put(skb, IFLA_VF_TRUST,
|
||||
sizeof(vf_trust), &vf_trust))
|
||||
goto nla_put_failure;
|
||||
memset(&vf_stats, 0, sizeof(vf_stats));
|
||||
if (dev->netdev_ops->ndo_get_vf_stats)
|
||||
dev->netdev_ops->ndo_get_vf_stats(dev, i,
|
||||
&vf_stats);
|
||||
vfstats = nla_nest_start(skb, IFLA_VF_STATS);
|
||||
if (!vfstats) {
|
||||
nla_nest_cancel(skb, vf);
|
||||
nla_nest_cancel(skb, vfinfo);
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
|
||||
vf_stats.rx_packets) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
|
||||
vf_stats.tx_packets) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
|
||||
vf_stats.rx_bytes) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
|
||||
vf_stats.tx_bytes) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
|
||||
vf_stats.broadcast) ||
|
||||
nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
|
||||
vf_stats.multicast))
|
||||
goto nla_put_failure;
|
||||
nla_nest_end(skb, vfstats);
|
||||
nla_nest_end(skb, vf);
|
||||
}
|
||||
|
||||
nla_nest_end(skb, vfinfo);
|
||||
}
|
||||
|
||||
|
@ -4268,7 +4268,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
|
||||
memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len,
|
||||
2 * ETH_ALEN);
|
||||
skb->mac_header += VLAN_HLEN;
|
||||
return skb;
|
||||
}
|
||||
|
@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
|
||||
int max_retries, thresh;
|
||||
u8 defer_accept;
|
||||
|
||||
if (sk_listener->sk_state != TCP_LISTEN)
|
||||
if (sk_state_load(sk_listener) != TCP_LISTEN)
|
||||
goto drop;
|
||||
|
||||
max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
|
||||
@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
|
||||
* It is OK, because this socket enters to hash table only
|
||||
* after validation is complete.
|
||||
*/
|
||||
sk->sk_state = TCP_LISTEN;
|
||||
sk_state_store(sk, TCP_LISTEN);
|
||||
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
|
||||
inet->inet_sport = htons(inet->inet_num);
|
||||
|
||||
|
@ -45,7 +45,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
|
||||
struct net *net = nf_ct_net(ct);
|
||||
const struct nf_conn *master = ct->master;
|
||||
struct nf_conntrack_expect *other_exp;
|
||||
struct nf_conntrack_tuple t;
|
||||
struct nf_conntrack_tuple t = {};
|
||||
const struct nf_ct_pptp_master *ct_pptp_info;
|
||||
const struct nf_nat_pptp *nat_pptp_info;
|
||||
struct nf_nat_range range;
|
||||
|
@ -406,10 +406,12 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
|
||||
ip_select_ident(net, skb, NULL);
|
||||
|
||||
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
|
||||
skb->transport_header += iphlen;
|
||||
if (iph->protocol == IPPROTO_ICMP &&
|
||||
length >= iphlen + sizeof(struct icmphdr))
|
||||
icmp_out_count(net, ((struct icmphdr *)
|
||||
skb_transport_header(skb))->type);
|
||||
}
|
||||
if (iph->protocol == IPPROTO_ICMP)
|
||||
icmp_out_count(net, ((struct icmphdr *)
|
||||
skb_transport_header(skb))->type);
|
||||
|
||||
err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
|
||||
net, sk, skb, NULL, rt->dst.dev,
|
||||
|
@ -451,11 +451,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
unsigned int mask;
|
||||
struct sock *sk = sock->sk;
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
int state;
|
||||
|
||||
sock_rps_record_flow(sk);
|
||||
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
|
||||
state = sk_state_load(sk);
|
||||
if (state == TCP_LISTEN)
|
||||
return inet_csk_listen_poll(sk);
|
||||
|
||||
/* Socket is not locked. We are protected from async events
|
||||
@ -492,14 +495,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
|
||||
* blocking on fresh not-connected or disconnected socket. --ANK
|
||||
*/
|
||||
if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
|
||||
if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
|
||||
mask |= POLLHUP;
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
|
||||
|
||||
/* Connected or passive Fast Open socket? */
|
||||
if (sk->sk_state != TCP_SYN_SENT &&
|
||||
(sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
|
||||
if (state != TCP_SYN_SENT &&
|
||||
(state != TCP_SYN_RECV || tp->fastopen_rsk)) {
|
||||
int target = sock_rcvlowat(sk, 0, INT_MAX);
|
||||
|
||||
if (tp->urg_seq == tp->copied_seq &&
|
||||
@ -507,9 +510,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
tp->urg_data)
|
||||
target++;
|
||||
|
||||
/* Potential race condition. If read of tp below will
|
||||
* escape above sk->sk_state, we can be illegally awaken
|
||||
* in SYN_* states. */
|
||||
if (tp->rcv_nxt - tp->copied_seq >= target)
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
|
||||
@ -1934,7 +1934,7 @@ void tcp_set_state(struct sock *sk, int state)
|
||||
/* Change state AFTER socket is unhashed to avoid closed
|
||||
* socket sitting in hash tables.
|
||||
*/
|
||||
sk->sk_state = state;
|
||||
sk_state_store(sk, state);
|
||||
|
||||
#ifdef STATE_TRACE
|
||||
SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
|
||||
@ -2644,7 +2644,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||
if (sk->sk_type != SOCK_STREAM)
|
||||
return;
|
||||
|
||||
info->tcpi_state = sk->sk_state;
|
||||
info->tcpi_state = sk_state_load(sk);
|
||||
|
||||
info->tcpi_ca_state = icsk->icsk_ca_state;
|
||||
info->tcpi_retransmits = icsk->icsk_retransmits;
|
||||
info->tcpi_probes = icsk->icsk_probes_out;
|
||||
@ -2672,7 +2673,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||
info->tcpi_snd_mss = tp->mss_cache;
|
||||
info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN) {
|
||||
if (info->tcpi_state == TCP_LISTEN) {
|
||||
info->tcpi_unacked = sk->sk_ack_backlog;
|
||||
info->tcpi_sacked = sk->sk_max_ack_backlog;
|
||||
} else {
|
||||
|
@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
{
|
||||
struct tcp_info *info = _info;
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN) {
|
||||
if (sk_state_load(sk) == TCP_LISTEN) {
|
||||
r->idiag_rqueue = sk->sk_ack_backlog;
|
||||
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
||||
} else if (sk->sk_type == SOCK_STREAM) {
|
||||
|
@ -2158,6 +2158,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
|
||||
__u16 destp = ntohs(inet->inet_dport);
|
||||
__u16 srcp = ntohs(inet->inet_sport);
|
||||
int rx_queue;
|
||||
int state;
|
||||
|
||||
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
|
||||
icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
|
||||
@ -2175,17 +2176,18 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
|
||||
timer_expires = jiffies;
|
||||
}
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
state = sk_state_load(sk);
|
||||
if (state == TCP_LISTEN)
|
||||
rx_queue = sk->sk_ack_backlog;
|
||||
else
|
||||
/*
|
||||
* because we dont lock socket, we might find a transient negative value
|
||||
/* Because we don't lock the socket,
|
||||
* we might find a transient negative value.
|
||||
*/
|
||||
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
|
||||
|
||||
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
|
||||
"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
|
||||
i, src, srcp, dest, destp, sk->sk_state,
|
||||
i, src, srcp, dest, destp, state,
|
||||
tp->write_seq - tp->snd_una,
|
||||
rx_queue,
|
||||
timer_active,
|
||||
@ -2199,8 +2201,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
|
||||
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
||||
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
|
||||
tp->snd_cwnd,
|
||||
sk->sk_state == TCP_LISTEN ?
|
||||
(fastopenq ? fastopenq->max_qlen : 0) :
|
||||
state == TCP_LISTEN ?
|
||||
fastopenq->max_qlen :
|
||||
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
|
||||
}
|
||||
|
||||
|
@ -1651,7 +1651,6 @@ out:
|
||||
if (!err) {
|
||||
ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
|
||||
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
|
||||
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
|
||||
} else {
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
||||
}
|
||||
@ -2015,7 +2014,6 @@ out:
|
||||
if (!err) {
|
||||
ICMP6MSGOUT_INC_STATS(net, idev, type);
|
||||
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
|
||||
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
|
||||
} else
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
||||
|
||||
|
@ -404,6 +404,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static bool __rt6_check_expired(const struct rt6_info *rt)
|
||||
{
|
||||
if (rt->rt6i_flags & RTF_EXPIRES)
|
||||
return time_after(jiffies, rt->dst.expires);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool rt6_check_expired(const struct rt6_info *rt)
|
||||
{
|
||||
if (rt->rt6i_flags & RTF_EXPIRES) {
|
||||
@ -1252,7 +1260,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
|
||||
|
||||
static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
|
||||
{
|
||||
if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
|
||||
if (!__rt6_check_expired(rt) &&
|
||||
rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
|
||||
rt6_check((struct rt6_info *)(rt->dst.from), cookie))
|
||||
return &rt->dst;
|
||||
else
|
||||
@ -1272,7 +1281,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
|
||||
rt6_dst_from_metrics_check(rt);
|
||||
|
||||
if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
|
||||
if (rt->rt6i_flags & RTF_PCPU ||
|
||||
(unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
|
||||
return rt6_dst_from_check(rt, cookie);
|
||||
else
|
||||
return rt6_check(rt, cookie);
|
||||
@ -1322,6 +1332,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
|
||||
rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
|
||||
}
|
||||
|
||||
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
|
||||
{
|
||||
return !(rt->rt6i_flags & RTF_CACHE) &&
|
||||
(rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
|
||||
}
|
||||
|
||||
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
||||
const struct ipv6hdr *iph, u32 mtu)
|
||||
{
|
||||
@ -1335,7 +1351,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
||||
if (mtu >= dst_mtu(dst))
|
||||
return;
|
||||
|
||||
if (rt6->rt6i_flags & RTF_CACHE) {
|
||||
if (!rt6_cache_allowed_for_pmtu(rt6)) {
|
||||
rt6_do_update_pmtu(rt6, mtu);
|
||||
} else {
|
||||
const struct in6_addr *daddr, *saddr;
|
||||
|
@ -1690,6 +1690,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||
const struct tcp_sock *tp = tcp_sk(sp);
|
||||
const struct inet_connection_sock *icsk = inet_csk(sp);
|
||||
const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
|
||||
int rx_queue;
|
||||
int state;
|
||||
|
||||
dest = &sp->sk_v6_daddr;
|
||||
src = &sp->sk_v6_rcv_saddr;
|
||||
@ -1710,6 +1712,15 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||
timer_expires = jiffies;
|
||||
}
|
||||
|
||||
state = sk_state_load(sp);
|
||||
if (state == TCP_LISTEN)
|
||||
rx_queue = sp->sk_ack_backlog;
|
||||
else
|
||||
/* Because we don't lock the socket,
|
||||
* we might find a transient negative value.
|
||||
*/
|
||||
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
|
||||
|
||||
seq_printf(seq,
|
||||
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
|
||||
"%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
|
||||
@ -1718,9 +1729,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||
src->s6_addr32[2], src->s6_addr32[3], srcp,
|
||||
dest->s6_addr32[0], dest->s6_addr32[1],
|
||||
dest->s6_addr32[2], dest->s6_addr32[3], destp,
|
||||
sp->sk_state,
|
||||
tp->write_seq-tp->snd_una,
|
||||
(sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
|
||||
state,
|
||||
tp->write_seq - tp->snd_una,
|
||||
rx_queue,
|
||||
timer_active,
|
||||
jiffies_delta_to_clock_t(timer_expires - jiffies),
|
||||
icsk->icsk_retransmits,
|
||||
@ -1732,7 +1743,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
||||
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
|
||||
tp->snd_cwnd,
|
||||
sp->sk_state == TCP_LISTEN ?
|
||||
state == TCP_LISTEN ?
|
||||
fastopenq->max_qlen :
|
||||
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
|
||||
);
|
||||
|
@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_TEE
|
||||
depends on IPV6 || IPV6=n
|
||||
depends on !NF_CONNTRACK || NF_CONNTRACK
|
||||
select NF_DUP_IPV4
|
||||
select NF_DUP_IPV6 if IP6_NF_IPTABLES
|
||||
select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
|
||||
---help---
|
||||
This option adds a "TEE" target with which a packet can be cloned and
|
||||
this clone be rerouted to another nexthop.
|
||||
@ -882,7 +882,7 @@ config NETFILTER_XT_TARGET_TPROXY
|
||||
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
|
||||
depends on IP_NF_MANGLE
|
||||
select NF_DEFRAG_IPV4
|
||||
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
|
||||
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
|
||||
help
|
||||
This option adds a `TPROXY' target, which is somewhat similar to
|
||||
REDIRECT. It can only be used in the mangle table and is useful
|
||||
@ -1375,7 +1375,7 @@ config NETFILTER_XT_MATCH_SOCKET
|
||||
depends on IPV6 || IPV6=n
|
||||
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
|
||||
select NF_DEFRAG_IPV4
|
||||
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
|
||||
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
|
||||
help
|
||||
This option adds a `socket' match, which can be used to match
|
||||
packets for which a TCP or UDP socket lookup finds a valid socket.
|
||||
|
@ -33,7 +33,7 @@
|
||||
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
|
||||
#define mtype MTYPE
|
||||
|
||||
#define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id))
|
||||
#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
|
||||
|
||||
static void
|
||||
mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
|
||||
@ -67,12 +67,9 @@ mtype_destroy(struct ip_set *set)
|
||||
del_timer_sync(&map->gc);
|
||||
|
||||
ip_set_free(map->members);
|
||||
if (set->dsize) {
|
||||
if (set->extensions & IPSET_EXT_DESTROY)
|
||||
mtype_ext_cleanup(set);
|
||||
ip_set_free(map->extensions);
|
||||
}
|
||||
kfree(map);
|
||||
if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
|
||||
mtype_ext_cleanup(set);
|
||||
ip_set_free(map);
|
||||
|
||||
set->data = NULL;
|
||||
}
|
||||
@ -92,16 +89,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
|
||||
{
|
||||
const struct mtype *map = set->data;
|
||||
struct nlattr *nested;
|
||||
size_t memsize = sizeof(*map) + map->memsize;
|
||||
|
||||
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
|
||||
if (!nested)
|
||||
goto nla_put_failure;
|
||||
if (mtype_do_head(skb, map) ||
|
||||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
|
||||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
|
||||
htonl(sizeof(*map) +
|
||||
map->memsize +
|
||||
set->dsize * map->elements)))
|
||||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
|
||||
goto nla_put_failure;
|
||||
if (unlikely(ip_set_put_flags(skb, set)))
|
||||
goto nla_put_failure;
|
||||
|
@ -41,7 +41,6 @@ MODULE_ALIAS("ip_set_bitmap:ip");
|
||||
/* Type structure */
|
||||
struct bitmap_ip {
|
||||
void *members; /* the set members */
|
||||
void *extensions; /* data extensions */
|
||||
u32 first_ip; /* host byte order, included in range */
|
||||
u32 last_ip; /* host byte order, included in range */
|
||||
u32 elements; /* number of max elements in the set */
|
||||
@ -49,6 +48,8 @@ struct bitmap_ip {
|
||||
size_t memsize; /* members size */
|
||||
u8 netmask; /* subnet netmask */
|
||||
struct timer_list gc; /* garbage collection */
|
||||
unsigned char extensions[0] /* data extensions */
|
||||
__aligned(__alignof__(u64));
|
||||
};
|
||||
|
||||
/* ADT structure for generic function args */
|
||||
@ -224,13 +225,6 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
|
||||
map->members = ip_set_alloc(map->memsize);
|
||||
if (!map->members)
|
||||
return false;
|
||||
if (set->dsize) {
|
||||
map->extensions = ip_set_alloc(set->dsize * elements);
|
||||
if (!map->extensions) {
|
||||
kfree(map->members);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
map->first_ip = first_ip;
|
||||
map->last_ip = last_ip;
|
||||
map->elements = elements;
|
||||
@ -316,13 +310,13 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
||||
pr_debug("hosts %u, elements %llu\n",
|
||||
hosts, (unsigned long long)elements);
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
set->dsize = ip_set_elem_len(set, tb, 0, 0);
|
||||
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
map->memsize = bitmap_bytes(0, elements - 1);
|
||||
set->variant = &bitmap_ip;
|
||||
set->dsize = ip_set_elem_len(set, tb, 0);
|
||||
if (!init_map_ip(set, map, first_ip, last_ip,
|
||||
elements, hosts, netmask)) {
|
||||
kfree(map);
|
||||
|
@ -47,24 +47,26 @@ enum {
|
||||
/* Type structure */
|
||||
struct bitmap_ipmac {
|
||||
void *members; /* the set members */
|
||||
void *extensions; /* MAC + data extensions */
|
||||
u32 first_ip; /* host byte order, included in range */
|
||||
u32 last_ip; /* host byte order, included in range */
|
||||
u32 elements; /* number of max elements in the set */
|
||||
size_t memsize; /* members size */
|
||||
struct timer_list gc; /* garbage collector */
|
||||
unsigned char extensions[0] /* MAC + data extensions */
|
||||
__aligned(__alignof__(u64));
|
||||
};
|
||||
|
||||
/* ADT structure for generic function args */
|
||||
struct bitmap_ipmac_adt_elem {
|
||||
unsigned char ether[ETH_ALEN] __aligned(2);
|
||||
u16 id;
|
||||
unsigned char *ether;
|
||||
u16 add_mac;
|
||||
};
|
||||
|
||||
struct bitmap_ipmac_elem {
|
||||
unsigned char ether[ETH_ALEN];
|
||||
unsigned char filled;
|
||||
} __attribute__ ((aligned));
|
||||
} __aligned(__alignof__(u64));
|
||||
|
||||
static inline u32
|
||||
ip_to_id(const struct bitmap_ipmac *m, u32 ip)
|
||||
@ -72,11 +74,11 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip)
|
||||
return ip - m->first_ip;
|
||||
}
|
||||
|
||||
static inline struct bitmap_ipmac_elem *
|
||||
get_elem(void *extensions, u16 id, size_t dsize)
|
||||
{
|
||||
return (struct bitmap_ipmac_elem *)(extensions + id * dsize);
|
||||
}
|
||||
#define get_elem(extensions, id, dsize) \
|
||||
(struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
|
||||
|
||||
#define get_const_elem(extensions, id, dsize) \
|
||||
(const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
|
||||
|
||||
/* Common functions */
|
||||
|
||||
@ -88,10 +90,9 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
|
||||
|
||||
if (!test_bit(e->id, map->members))
|
||||
return 0;
|
||||
elem = get_elem(map->extensions, e->id, dsize);
|
||||
if (elem->filled == MAC_FILLED)
|
||||
return !e->ether ||
|
||||
ether_addr_equal(e->ether, elem->ether);
|
||||
elem = get_const_elem(map->extensions, e->id, dsize);
|
||||
if (e->add_mac && elem->filled == MAC_FILLED)
|
||||
return ether_addr_equal(e->ether, elem->ether);
|
||||
/* Trigger kernel to fill out the ethernet address */
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -103,7 +104,7 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
|
||||
|
||||
if (!test_bit(id, map->members))
|
||||
return 0;
|
||||
elem = get_elem(map->extensions, id, dsize);
|
||||
elem = get_const_elem(map->extensions, id, dsize);
|
||||
/* Timer not started for the incomplete elements */
|
||||
return elem->filled == MAC_FILLED;
|
||||
}
|
||||
@ -133,7 +134,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
|
||||
* and we can reuse it later when MAC is filled out,
|
||||
* possibly by the kernel
|
||||
*/
|
||||
if (e->ether)
|
||||
if (e->add_mac)
|
||||
ip_set_timeout_set(timeout, t);
|
||||
else
|
||||
*timeout = t;
|
||||
@ -150,7 +151,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
|
||||
elem = get_elem(map->extensions, e->id, dsize);
|
||||
if (test_bit(e->id, map->members)) {
|
||||
if (elem->filled == MAC_FILLED) {
|
||||
if (e->ether &&
|
||||
if (e->add_mac &&
|
||||
(flags & IPSET_FLAG_EXIST) &&
|
||||
!ether_addr_equal(e->ether, elem->ether)) {
|
||||
/* memcpy isn't atomic */
|
||||
@ -159,7 +160,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
|
||||
ether_addr_copy(elem->ether, e->ether);
|
||||
}
|
||||
return IPSET_ADD_FAILED;
|
||||
} else if (!e->ether)
|
||||
} else if (!e->add_mac)
|
||||
/* Already added without ethernet address */
|
||||
return IPSET_ADD_FAILED;
|
||||
/* Fill the MAC address and trigger the timer activation */
|
||||
@ -168,7 +169,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
|
||||
ether_addr_copy(elem->ether, e->ether);
|
||||
elem->filled = MAC_FILLED;
|
||||
return IPSET_ADD_START_STORED_TIMEOUT;
|
||||
} else if (e->ether) {
|
||||
} else if (e->add_mac) {
|
||||
/* We can store MAC too */
|
||||
ether_addr_copy(elem->ether, e->ether);
|
||||
elem->filled = MAC_FILLED;
|
||||
@ -191,7 +192,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
|
||||
u32 id, size_t dsize)
|
||||
{
|
||||
const struct bitmap_ipmac_elem *elem =
|
||||
get_elem(map->extensions, id, dsize);
|
||||
get_const_elem(map->extensions, id, dsize);
|
||||
|
||||
return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
|
||||
htonl(map->first_ip + id)) ||
|
||||
@ -213,7 +214,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
|
||||
{
|
||||
struct bitmap_ipmac *map = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct bitmap_ipmac_adt_elem e = { .id = 0 };
|
||||
struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
||||
u32 ip;
|
||||
|
||||
@ -231,7 +232,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
|
||||
return -EINVAL;
|
||||
|
||||
e.id = ip_to_id(map, ip);
|
||||
e.ether = eth_hdr(skb)->h_source;
|
||||
memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
|
||||
|
||||
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
|
||||
}
|
||||
@ -265,11 +266,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
return -IPSET_ERR_BITMAP_RANGE;
|
||||
|
||||
e.id = ip_to_id(map, ip);
|
||||
if (tb[IPSET_ATTR_ETHER])
|
||||
e.ether = nla_data(tb[IPSET_ATTR_ETHER]);
|
||||
else
|
||||
e.ether = NULL;
|
||||
|
||||
if (tb[IPSET_ATTR_ETHER]) {
|
||||
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
|
||||
e.add_mac = 1;
|
||||
}
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
return ip_set_eexist(ret, flags) ? 0 : ret;
|
||||
@ -300,13 +300,6 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
|
||||
map->members = ip_set_alloc(map->memsize);
|
||||
if (!map->members)
|
||||
return false;
|
||||
if (set->dsize) {
|
||||
map->extensions = ip_set_alloc(set->dsize * elements);
|
||||
if (!map->extensions) {
|
||||
kfree(map->members);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
map->first_ip = first_ip;
|
||||
map->last_ip = last_ip;
|
||||
map->elements = elements;
|
||||
@ -361,14 +354,15 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
||||
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
|
||||
return -IPSET_ERR_BITMAP_RANGE_SIZE;
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
set->dsize = ip_set_elem_len(set, tb,
|
||||
sizeof(struct bitmap_ipmac_elem),
|
||||
__alignof__(struct bitmap_ipmac_elem));
|
||||
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
map->memsize = bitmap_bytes(0, elements - 1);
|
||||
set->variant = &bitmap_ipmac;
|
||||
set->dsize = ip_set_elem_len(set, tb,
|
||||
sizeof(struct bitmap_ipmac_elem));
|
||||
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
|
||||
kfree(map);
|
||||
return -ENOMEM;
|
||||
|
@ -35,12 +35,13 @@ MODULE_ALIAS("ip_set_bitmap:port");
|
||||
/* Type structure */
|
||||
struct bitmap_port {
|
||||
void *members; /* the set members */
|
||||
void *extensions; /* data extensions */
|
||||
u16 first_port; /* host byte order, included in range */
|
||||
u16 last_port; /* host byte order, included in range */
|
||||
u32 elements; /* number of max elements in the set */
|
||||
size_t memsize; /* members size */
|
||||
struct timer_list gc; /* garbage collection */
|
||||
unsigned char extensions[0] /* data extensions */
|
||||
__aligned(__alignof__(u64));
|
||||
};
|
||||
|
||||
/* ADT structure for generic function args */
|
||||
@ -209,13 +210,6 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
|
||||
map->members = ip_set_alloc(map->memsize);
|
||||
if (!map->members)
|
||||
return false;
|
||||
if (set->dsize) {
|
||||
map->extensions = ip_set_alloc(set->dsize * map->elements);
|
||||
if (!map->extensions) {
|
||||
kfree(map->members);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
map->first_port = first_port;
|
||||
map->last_port = last_port;
|
||||
set->timeout = IPSET_NO_TIMEOUT;
|
||||
@ -232,6 +226,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
||||
{
|
||||
struct bitmap_port *map;
|
||||
u16 first_port, last_port;
|
||||
u32 elements;
|
||||
|
||||
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
|
||||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
|
||||
@ -248,14 +243,15 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
||||
last_port = tmp;
|
||||
}
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
elements = last_port - first_port + 1;
|
||||
set->dsize = ip_set_elem_len(set, tb, 0, 0);
|
||||
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
map->elements = last_port - first_port + 1;
|
||||
map->elements = elements;
|
||||
map->memsize = bitmap_bytes(0, map->elements);
|
||||
set->variant = &bitmap_port;
|
||||
set->dsize = ip_set_elem_len(set, tb, 0);
|
||||
if (!init_map_port(set, map, first_port, last_port)) {
|
||||
kfree(map);
|
||||
return -ENOMEM;
|
||||
|
@ -364,25 +364,27 @@ add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
|
||||
}
|
||||
|
||||
size_t
|
||||
ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
|
||||
ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
|
||||
size_t align)
|
||||
{
|
||||
enum ip_set_ext_id id;
|
||||
size_t offset = len;
|
||||
u32 cadt_flags = 0;
|
||||
|
||||
if (tb[IPSET_ATTR_CADT_FLAGS])
|
||||
cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
|
||||
if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
|
||||
set->flags |= IPSET_CREATE_FLAG_FORCEADD;
|
||||
if (!align)
|
||||
align = 1;
|
||||
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
|
||||
if (!add_extension(id, cadt_flags, tb))
|
||||
continue;
|
||||
offset = ALIGN(offset, ip_set_extensions[id].align);
|
||||
set->offset[id] = offset;
|
||||
len = ALIGN(len, ip_set_extensions[id].align);
|
||||
set->offset[id] = len;
|
||||
set->extensions |= ip_set_extensions[id].type;
|
||||
offset += ip_set_extensions[id].len;
|
||||
len += ip_set_extensions[id].len;
|
||||
}
|
||||
return offset;
|
||||
return ALIGN(len, align);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_set_elem_len);
|
||||
|
||||
|
@ -72,8 +72,9 @@ struct hbucket {
|
||||
DECLARE_BITMAP(used, AHASH_MAX_TUNED);
|
||||
u8 size; /* size of the array */
|
||||
u8 pos; /* position of the first free entry */
|
||||
unsigned char value[0]; /* the array of the values */
|
||||
} __attribute__ ((aligned));
|
||||
unsigned char value[0] /* the array of the values */
|
||||
__aligned(__alignof__(u64));
|
||||
};
|
||||
|
||||
/* The hash table: the table size stored here in order to make resizing easy */
|
||||
struct htable {
|
||||
@ -475,7 +476,7 @@ static void
|
||||
mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
|
||||
{
|
||||
struct htable *t;
|
||||
struct hbucket *n;
|
||||
struct hbucket *n, *tmp;
|
||||
struct mtype_elem *data;
|
||||
u32 i, j, d;
|
||||
#ifdef IP_SET_HASH_WITH_NETS
|
||||
@ -510,9 +511,14 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
|
||||
}
|
||||
}
|
||||
if (d >= AHASH_INIT_SIZE) {
|
||||
struct hbucket *tmp = kzalloc(sizeof(*tmp) +
|
||||
(n->size - AHASH_INIT_SIZE) * dsize,
|
||||
GFP_ATOMIC);
|
||||
if (d >= n->size) {
|
||||
rcu_assign_pointer(hbucket(t, i), NULL);
|
||||
kfree_rcu(n, rcu);
|
||||
continue;
|
||||
}
|
||||
tmp = kzalloc(sizeof(*tmp) +
|
||||
(n->size - AHASH_INIT_SIZE) * dsize,
|
||||
GFP_ATOMIC);
|
||||
if (!tmp)
|
||||
/* Still try to delete expired elements */
|
||||
continue;
|
||||
@ -522,7 +528,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
|
||||
continue;
|
||||
data = ahash_data(n, j, dsize);
|
||||
memcpy(tmp->value + d * dsize, data, dsize);
|
||||
set_bit(j, tmp->used);
|
||||
set_bit(d, tmp->used);
|
||||
d++;
|
||||
}
|
||||
tmp->pos = d;
|
||||
@ -1323,12 +1329,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
|
||||
#endif
|
||||
set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
|
||||
set->dsize = ip_set_elem_len(set, tb,
|
||||
sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
|
||||
sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
|
||||
__alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
|
||||
#ifndef IP_SET_PROTO_UNDEF
|
||||
} else {
|
||||
set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
|
||||
set->dsize = ip_set_elem_len(set, tb,
|
||||
sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
|
||||
sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
|
||||
__alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
|
||||
}
|
||||
#endif
|
||||
if (tb[IPSET_ATTR_TIMEOUT]) {
|
||||
|
@ -31,7 +31,7 @@ struct set_elem {
|
||||
struct rcu_head rcu;
|
||||
struct list_head list;
|
||||
ip_set_id_t id;
|
||||
};
|
||||
} __aligned(__alignof__(u64));
|
||||
|
||||
struct set_adt_elem {
|
||||
ip_set_id_t id;
|
||||
@ -618,7 +618,8 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
||||
size = IP_SET_LIST_MIN_SIZE;
|
||||
|
||||
set->variant = &set_variant;
|
||||
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem));
|
||||
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
|
||||
__alignof__(struct set_elem));
|
||||
if (!init_list_set(net, set, size))
|
||||
return -ENOMEM;
|
||||
if (tb[IPSET_ATTR_TIMEOUT]) {
|
||||
|
@ -1176,6 +1176,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
|
||||
struct ip_vs_protocol *pp;
|
||||
struct ip_vs_proto_data *pd;
|
||||
struct ip_vs_conn *cp;
|
||||
struct sock *sk;
|
||||
|
||||
EnterFunction(11);
|
||||
|
||||
@ -1183,13 +1184,12 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
|
||||
if (skb->ipvs_property)
|
||||
return NF_ACCEPT;
|
||||
|
||||
sk = skb_to_full_sk(skb);
|
||||
/* Bad... Do not break raw sockets */
|
||||
if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
|
||||
if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
|
||||
af == AF_INET)) {
|
||||
struct sock *sk = skb->sk;
|
||||
struct inet_sock *inet = inet_sk(skb->sk);
|
||||
|
||||
if (inet && sk->sk_family == PF_INET && inet->nodefrag)
|
||||
if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
@ -1681,6 +1681,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
|
||||
struct ip_vs_conn *cp;
|
||||
int ret, pkts;
|
||||
int conn_reuse_mode;
|
||||
struct sock *sk;
|
||||
|
||||
/* Already marked as IPVS request or reply? */
|
||||
if (skb->ipvs_property)
|
||||
@ -1708,12 +1709,11 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
|
||||
ip_vs_fill_iph_skb(af, skb, false, &iph);
|
||||
|
||||
/* Bad... Do not break raw sockets */
|
||||
if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
|
||||
sk = skb_to_full_sk(skb);
|
||||
if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
|
||||
af == AF_INET)) {
|
||||
struct sock *sk = skb->sk;
|
||||
struct inet_sock *inet = inet_sk(skb->sk);
|
||||
|
||||
if (inet && sk->sk_family == PF_INET && inet->nodefrag)
|
||||
if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
@ -825,7 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct net *net = sock_net(ctnl);
|
||||
struct nfnl_log_net *log = nfnl_log_pernet(net);
|
||||
int ret = 0;
|
||||
u16 flags;
|
||||
u16 flags = 0;
|
||||
|
||||
if (nfula[NFULA_CFG_CMD]) {
|
||||
u_int8_t pf = nfmsg->nfgen_family;
|
||||
|
@ -47,27 +47,34 @@ static void nft_counter_eval(const struct nft_expr *expr,
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter,
|
||||
struct nft_counter *total)
|
||||
{
|
||||
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
|
||||
struct nft_counter_percpu *cpu_stats;
|
||||
struct nft_counter total;
|
||||
const struct nft_counter_percpu *cpu_stats;
|
||||
u64 bytes, packets;
|
||||
unsigned int seq;
|
||||
int cpu;
|
||||
|
||||
memset(&total, 0, sizeof(total));
|
||||
memset(total, 0, sizeof(*total));
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_stats = per_cpu_ptr(priv->counter, cpu);
|
||||
cpu_stats = per_cpu_ptr(counter, cpu);
|
||||
do {
|
||||
seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
bytes = cpu_stats->counter.bytes;
|
||||
packets = cpu_stats->counter.packets;
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
|
||||
|
||||
total.packets += packets;
|
||||
total.bytes += bytes;
|
||||
total->packets += packets;
|
||||
total->bytes += bytes;
|
||||
}
|
||||
}
|
||||
|
||||
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
|
||||
struct nft_counter total;
|
||||
|
||||
nft_counter_fetch(priv->counter, &total);
|
||||
|
||||
if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
|
||||
nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
|
||||
@ -118,6 +125,31 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
|
||||
free_percpu(priv->counter);
|
||||
}
|
||||
|
||||
static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
|
||||
{
|
||||
struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
|
||||
struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
|
||||
struct nft_counter_percpu __percpu *cpu_stats;
|
||||
struct nft_counter_percpu *this_cpu;
|
||||
struct nft_counter total;
|
||||
|
||||
nft_counter_fetch(priv->counter, &total);
|
||||
|
||||
cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
|
||||
GFP_ATOMIC);
|
||||
if (cpu_stats == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
preempt_disable();
|
||||
this_cpu = this_cpu_ptr(cpu_stats);
|
||||
this_cpu->counter.packets = total.packets;
|
||||
this_cpu->counter.bytes = total.bytes;
|
||||
preempt_enable();
|
||||
|
||||
priv_clone->counter = cpu_stats;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nft_expr_type nft_counter_type;
|
||||
static const struct nft_expr_ops nft_counter_ops = {
|
||||
.type = &nft_counter_type,
|
||||
@ -126,6 +158,7 @@ static const struct nft_expr_ops nft_counter_ops = {
|
||||
.init = nft_counter_init,
|
||||
.destroy = nft_counter_destroy,
|
||||
.dump = nft_counter_dump,
|
||||
.clone = nft_counter_clone,
|
||||
};
|
||||
|
||||
static struct nft_expr_type nft_counter_type __read_mostly = {
|
||||
|
@ -50,8 +50,9 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
|
||||
}
|
||||
|
||||
ext = nft_set_elem_ext(set, elem);
|
||||
if (priv->expr != NULL)
|
||||
nft_expr_clone(nft_set_ext_expr(ext), priv->expr);
|
||||
if (priv->expr != NULL &&
|
||||
nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
|
||||
return NULL;
|
||||
|
||||
return elem;
|
||||
}
|
||||
|
@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk)
|
||||
kfree_rcu(po->rollover, rcu);
|
||||
}
|
||||
|
||||
static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
/* Earlier code assumed this would be a VLAN pkt, double-check
|
||||
* this now that we have the actual packet in hand. We can only
|
||||
* do this check on Ethernet devices.
|
||||
*/
|
||||
if (unlikely(dev->type != ARPHRD_ETHER))
|
||||
return false;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
|
||||
}
|
||||
|
||||
static const struct proto_ops packet_ops;
|
||||
|
||||
static const struct proto_ops packet_ops_spkt;
|
||||
@ -1902,18 +1916,10 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
|
||||
/* Earlier code assumed this would be a VLAN pkt,
|
||||
* double-check this now that we have the actual
|
||||
* packet in hand.
|
||||
*/
|
||||
struct ethhdr *ehdr;
|
||||
skb_reset_mac_header(skb);
|
||||
ehdr = eth_hdr(skb);
|
||||
if (ehdr->h_proto != htons(ETH_P_8021Q)) {
|
||||
err = -EMSGSIZE;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
|
||||
!packet_extra_vlan_len_allowed(dev, skb)) {
|
||||
err = -EMSGSIZE;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
skb->protocol = proto;
|
||||
@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void tpacket_set_protocol(const struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (dev->type == ARPHRD_ETHER) {
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = eth_hdr(skb)->h_proto;
|
||||
}
|
||||
}
|
||||
|
||||
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
|
||||
void *frame, struct net_device *dev, int size_max,
|
||||
__be16 proto, unsigned char *addr, int hlen)
|
||||
@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
|
||||
skb_reserve(skb, hlen);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
if (!packet_use_direct_xmit(po))
|
||||
skb_probe_transport_header(skb, 0);
|
||||
if (unlikely(po->tp_tx_has_off)) {
|
||||
int off_min, off_max, off;
|
||||
off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
|
||||
@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
|
||||
dev->hard_header_len);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
if (!skb->protocol)
|
||||
tpacket_set_protocol(dev, skb);
|
||||
|
||||
data += dev->hard_header_len;
|
||||
to_write -= dev->hard_header_len;
|
||||
@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
|
||||
len = ((to_write > len_max) ? len_max : to_write);
|
||||
}
|
||||
|
||||
skb_probe_transport_header(skb, 0);
|
||||
|
||||
return tp_len;
|
||||
}
|
||||
|
||||
@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
if (unlikely(!(dev->flags & IFF_UP)))
|
||||
goto out_put;
|
||||
|
||||
reserve = dev->hard_header_len + VLAN_HLEN;
|
||||
if (po->sk.sk_socket->type == SOCK_RAW)
|
||||
reserve = dev->hard_header_len;
|
||||
size_max = po->tx_ring.frame_size
|
||||
- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
|
||||
|
||||
if (size_max > dev->mtu + reserve)
|
||||
size_max = dev->mtu + reserve;
|
||||
if (size_max > dev->mtu + reserve + VLAN_HLEN)
|
||||
size_max = dev->mtu + reserve + VLAN_HLEN;
|
||||
|
||||
do {
|
||||
ph = packet_current_frame(po, &po->tx_ring,
|
||||
@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
|
||||
addr, hlen);
|
||||
if (likely(tp_len >= 0) &&
|
||||
tp_len > dev->mtu + dev->hard_header_len) {
|
||||
struct ethhdr *ehdr;
|
||||
/* Earlier code assumed this would be a VLAN pkt,
|
||||
* double-check this now that we have the actual
|
||||
* packet in hand.
|
||||
*/
|
||||
tp_len > dev->mtu + reserve &&
|
||||
!packet_extra_vlan_len_allowed(dev, skb))
|
||||
tp_len = -EMSGSIZE;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
ehdr = eth_hdr(skb);
|
||||
if (ehdr->h_proto != htons(ETH_P_8021Q))
|
||||
tp_len = -EMSGSIZE;
|
||||
}
|
||||
if (unlikely(tp_len < 0)) {
|
||||
if (po->tp_loss) {
|
||||
__packet_set_status(po, ph,
|
||||
@ -2765,18 +2775,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
|
||||
sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
|
||||
|
||||
if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
|
||||
/* Earlier code assumed this would be a VLAN pkt,
|
||||
* double-check this now that we have the actual
|
||||
* packet in hand.
|
||||
*/
|
||||
struct ethhdr *ehdr;
|
||||
skb_reset_mac_header(skb);
|
||||
ehdr = eth_hdr(skb);
|
||||
if (ehdr->h_proto != htons(ETH_P_8021Q)) {
|
||||
err = -EMSGSIZE;
|
||||
goto out_free;
|
||||
}
|
||||
if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
|
||||
!packet_extra_vlan_len_allowed(dev, skb)) {
|
||||
err = -EMSGSIZE;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
skb->protocol = proto;
|
||||
@ -2807,8 +2809,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
len += vnet_hdr_len;
|
||||
}
|
||||
|
||||
if (!packet_use_direct_xmit(po))
|
||||
skb_probe_transport_header(skb, reserve);
|
||||
skb_probe_transport_header(skb, reserve);
|
||||
|
||||
if (unlikely(extra_len == 4))
|
||||
skb->no_fcs = 1;
|
||||
|
||||
@ -4107,7 +4109,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
err = -EINVAL;
|
||||
if (unlikely((int)req->tp_block_size <= 0))
|
||||
goto out;
|
||||
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
|
||||
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
|
||||
goto out;
|
||||
if (po->tp_version >= TPACKET_V3 &&
|
||||
(int)(req->tp_block_size -
|
||||
@ -4119,8 +4121,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
|
||||
goto out;
|
||||
|
||||
rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
|
||||
if (unlikely(rb->frames_per_block <= 0))
|
||||
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
|
||||
if (unlikely(rb->frames_per_block == 0))
|
||||
goto out;
|
||||
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
|
||||
req->tp_frame_nr))
|
||||
|
@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
|
||||
if (!has_sha1)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
|
||||
hmacs->shmac_num_idents * sizeof(__u16));
|
||||
for (i = 0; i < hmacs->shmac_num_idents; i++)
|
||||
ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
|
||||
ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
|
||||
hmacs->shmac_num_idents * sizeof(__u16));
|
||||
return 0;
|
||||
|
@ -441,6 +441,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
|
||||
if (state == TCP_LISTEN)
|
||||
unix_release_sock(skb->sk, 1);
|
||||
/* passed fds are erased in the kfree_skb hook */
|
||||
UNIXCB(skb).consumed = skb->len;
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@ -1799,6 +1800,7 @@ alloc_skb:
|
||||
* this - does no harm
|
||||
*/
|
||||
consume_skb(newskb);
|
||||
newskb = NULL;
|
||||
}
|
||||
|
||||
if (skb_append_pagefrags(skb, page, offset, size)) {
|
||||
@ -1811,8 +1813,11 @@ alloc_skb:
|
||||
skb->truesize += size;
|
||||
atomic_add(size, &sk->sk_wmem_alloc);
|
||||
|
||||
if (newskb)
|
||||
if (newskb) {
|
||||
spin_lock(&other->sk_receive_queue.lock);
|
||||
__skb_queue_tail(&other->sk_receive_queue, newskb);
|
||||
spin_unlock(&other->sk_receive_queue.lock);
|
||||
}
|
||||
|
||||
unix_state_unlock(other);
|
||||
mutex_unlock(&unix_sk(other)->readlock);
|
||||
@ -2072,6 +2077,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
|
||||
|
||||
do {
|
||||
int chunk;
|
||||
bool drop_skb;
|
||||
struct sk_buff *skb, *last;
|
||||
|
||||
unix_state_lock(sk);
|
||||
@ -2152,7 +2158,11 @@ unlock:
|
||||
}
|
||||
|
||||
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
|
||||
skb_get(skb);
|
||||
chunk = state->recv_actor(skb, skip, chunk, state);
|
||||
drop_skb = !unix_skb_len(skb);
|
||||
/* skb is only safe to use if !drop_skb */
|
||||
consume_skb(skb);
|
||||
if (chunk < 0) {
|
||||
if (copied == 0)
|
||||
copied = -EFAULT;
|
||||
@ -2161,6 +2171,18 @@ unlock:
|
||||
copied += chunk;
|
||||
size -= chunk;
|
||||
|
||||
if (drop_skb) {
|
||||
/* the skb was touched by a concurrent reader;
|
||||
* we should not expect anything from this skb
|
||||
* anymore and assume it invalid - we can be
|
||||
* sure it was dropped from the socket queue
|
||||
*
|
||||
* let's report a short read
|
||||
*/
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Mark read part of skb as used */
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
UNIXCB(skb).consumed += chunk;
|
||||
|
@ -67,10 +67,13 @@ HOSTLOADLIBES_lathist += -lelf
|
||||
# point this to your LLVM backend with bpf support
|
||||
LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
|
||||
|
||||
# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
|
||||
# But, ehere is not easy way to fix it, so just exclude it since it is
|
||||
# useless for BPF samples.
|
||||
$(obj)/%.o: $(src)/%.c
|
||||
clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
|
||||
-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
|
||||
-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
|
||||
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
|
||||
clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
|
||||
-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
|
||||
-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
|
||||
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
|
||||
|
@ -4,6 +4,9 @@ CC = gcc
|
||||
LEX = flex
|
||||
YACC = bison
|
||||
|
||||
CFLAGS += -Wall -O2
|
||||
CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
|
||||
|
||||
%.yacc.c: %.y
|
||||
$(YACC) -o $@ -d $<
|
||||
|
||||
@ -12,15 +15,13 @@ YACC = bison
|
||||
|
||||
all : bpf_jit_disasm bpf_dbg bpf_asm
|
||||
|
||||
bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
|
||||
bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
|
||||
bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
|
||||
bpf_jit_disasm : bpf_jit_disasm.o
|
||||
|
||||
bpf_dbg : CFLAGS = -Wall -O2
|
||||
bpf_dbg : LDLIBS = -lreadline
|
||||
bpf_dbg : bpf_dbg.o
|
||||
|
||||
bpf_asm : CFLAGS = -Wall -O2 -I.
|
||||
bpf_asm : LDLIBS =
|
||||
bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
|
||||
bpf_exp.lex.o : bpf_exp.yacc.c
|
||||
|
Loading…
Reference in New Issue
Block a user