Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix BPF handling of branch offset adjustmnets on backjumps, from Daniel Borkmann. 2) Make sure selinux knows about SOCK_DESTROY netlink messages, from Lorenzo Colitti. 3) Fix openvswitch tunnel mtu regression, from David Wragg. 4) Fix ICMP handling of TCP sockets in syn_recv state, from Eric Dumazet. 5) Fix SCTP user hmacid byte ordering bug, from Xin Long. 6) Fix recursive locking in ipv6 addrconf, from Subash Abhinov Kasiviswanathan. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: bpf: fix branch offset adjustment on backjumps after patching ctx expansion vxlan, gre, geneve: Set a large MTU on ovs-created tunnel devices geneve: Relax MTU constraints vxlan: Relax MTU constraints flow_dissector: Fix unaligned access in __skb_flow_dissector when used by eth_get_headlen of: of_mdio: Add marvell, 88e1145 to whitelist of PHY compatibilities. selinux: nlmsgtab: add SOCK_DESTROY to the netlink mapping tables sctp: translate network order to host order when users get a hmacid enic: increment devcmd2 result ring in case of timeout tg3: Fix for tg3 transmit queue 0 timed out when too many gso_segs net:Add sysctl_max_skb_frags tcp: do not drop syn_recv on all icmp reports ipv6: fix a lockdep splat unix: correctly track in-flight fds in sending process user_struct update be2net maintainers' email addresses dwc_eth_qos: Reset hardware before PHY start ipv6: addrconf: Fix recursive spin lock call
This commit is contained in:
commit
5de6ac75d9
@ -9787,10 +9787,11 @@ S: Supported
|
||||
F: drivers/scsi/be2iscsi/
|
||||
|
||||
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
|
||||
M: Sathya Perla <sathya.perla@avagotech.com>
|
||||
M: Ajit Khaparde <ajit.khaparde@avagotech.com>
|
||||
M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
|
||||
M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
|
||||
M: Sathya Perla <sathya.perla@broadcom.com>
|
||||
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
|
||||
M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
|
||||
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
|
||||
M: Somnath Kotur <somnath.kotur@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.emulex.com
|
||||
S: Supported
|
||||
|
@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
|
||||
{
|
||||
/* Check if we will never have enough descriptors,
|
||||
* as gso_segs can be more than current ring size
|
||||
*/
|
||||
return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
|
||||
}
|
||||
|
||||
static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
|
||||
|
||||
/* Use GSO to workaround all TSO packets that meet HW bug conditions
|
||||
@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
* vlan encapsulated.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_8021Q) ||
|
||||
skb->protocol == htons(ETH_P_8021AD))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
skb->protocol == htons(ETH_P_8021AD)) {
|
||||
if (tg3_tso_bug_gso_check(tnapi, skb))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (!skb_is_gso_v6(skb)) {
|
||||
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
|
||||
tg3_flag(tp, TSO_BUG))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
|
||||
tg3_flag(tp, TSO_BUG)) {
|
||||
if (tg3_tso_bug_gso_check(tnapi, skb))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
goto drop;
|
||||
}
|
||||
ip_csum = iph->check;
|
||||
ip_tot_len = iph->tot_len;
|
||||
iph->check = 0;
|
||||
@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (would_hit_hwbug) {
|
||||
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
|
||||
|
||||
if (mss) {
|
||||
if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
|
||||
/* If it's a TSO packet, do GSO instead of
|
||||
* allocating and copying to a large linear SKB
|
||||
*/
|
||||
|
@ -33,7 +33,7 @@
|
||||
|
||||
#define DRV_NAME "enic"
|
||||
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
|
||||
#define DRV_VERSION "2.3.0.12"
|
||||
#define DRV_VERSION "2.3.0.20"
|
||||
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
|
||||
|
||||
#define ENIC_BARS_MAX 6
|
||||
|
@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
|
||||
int wait)
|
||||
{
|
||||
struct devcmd2_controller *dc2c = vdev->devcmd2;
|
||||
struct devcmd2_result *result = dc2c->result + dc2c->next_result;
|
||||
struct devcmd2_result *result;
|
||||
u8 color;
|
||||
unsigned int i;
|
||||
int delay, err;
|
||||
u32 fetch_index, new_posted;
|
||||
@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
|
||||
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
|
||||
return 0;
|
||||
|
||||
result = dc2c->result + dc2c->next_result;
|
||||
color = dc2c->color;
|
||||
|
||||
dc2c->next_result++;
|
||||
if (dc2c->next_result == dc2c->result_size) {
|
||||
dc2c->next_result = 0;
|
||||
dc2c->color = dc2c->color ? 0 : 1;
|
||||
}
|
||||
|
||||
for (delay = 0; delay < wait; delay++) {
|
||||
if (result->color == dc2c->color) {
|
||||
dc2c->next_result++;
|
||||
if (dc2c->next_result == dc2c->result_size) {
|
||||
dc2c->next_result = 0;
|
||||
dc2c->color = dc2c->color ? 0 : 1;
|
||||
}
|
||||
if (result->color == color) {
|
||||
if (result->error) {
|
||||
err = result->error;
|
||||
if (err != ERR_ECMDUNKNOWN ||
|
||||
|
@ -1880,9 +1880,9 @@ static int dwceqos_open(struct net_device *ndev)
|
||||
}
|
||||
netdev_reset_queue(ndev);
|
||||
|
||||
dwceqos_init_hw(lp);
|
||||
napi_enable(&lp->napi);
|
||||
phy_start(lp->phy_dev);
|
||||
dwceqos_init_hw(lp);
|
||||
|
||||
netif_start_queue(ndev);
|
||||
tasklet_enable(&lp->tx_bdreclaim_tasklet);
|
||||
|
@ -1039,6 +1039,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return geneve_xmit_skb(skb, dev, info);
|
||||
}
|
||||
|
||||
static int geneve_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
/* GENEVE overhead is not fixed, so we can't enforce a more
|
||||
* precise max MTU.
|
||||
*/
|
||||
if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
|
||||
return -EINVAL;
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
||||
@ -1083,7 +1094,7 @@ static const struct net_device_ops geneve_netdev_ops = {
|
||||
.ndo_stop = geneve_stop,
|
||||
.ndo_start_xmit = geneve_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_change_mtu = geneve_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_fill_metadata_dst = geneve_fill_metadata_dst,
|
||||
@ -1442,11 +1453,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
|
||||
|
||||
err = geneve_configure(net, dev, &geneve_remote_unspec,
|
||||
0, 0, 0, htons(dst_port), true, 0);
|
||||
if (err) {
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* openvswitch users expect packet sizes to be unrestricted,
|
||||
* so set the largest MTU we can.
|
||||
*/
|
||||
err = geneve_change_mtu(dev, IP_MAX_MTU);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return dev;
|
||||
|
||||
err:
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
|
||||
|
||||
|
@ -2367,27 +2367,41 @@ static void vxlan_set_multicast_list(struct net_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static int __vxlan_change_mtu(struct net_device *dev,
|
||||
struct net_device *lowerdev,
|
||||
struct vxlan_rdst *dst, int new_mtu, bool strict)
|
||||
{
|
||||
int max_mtu = IP_MAX_MTU;
|
||||
|
||||
if (lowerdev)
|
||||
max_mtu = lowerdev->mtu;
|
||||
|
||||
if (dst->remote_ip.sa.sa_family == AF_INET6)
|
||||
max_mtu -= VXLAN6_HEADROOM;
|
||||
else
|
||||
max_mtu -= VXLAN_HEADROOM;
|
||||
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
if (strict)
|
||||
return -EINVAL;
|
||||
|
||||
new_mtu = max_mtu;
|
||||
}
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||
struct net_device *lowerdev;
|
||||
int max_mtu;
|
||||
|
||||
lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
|
||||
if (lowerdev == NULL)
|
||||
return eth_change_mtu(dev, new_mtu);
|
||||
|
||||
if (dst->remote_ip.sa.sa_family == AF_INET6)
|
||||
max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
|
||||
else
|
||||
max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
|
||||
|
||||
if (new_mtu < 68 || new_mtu > max_mtu)
|
||||
return -EINVAL;
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
|
||||
dst->remote_ifindex);
|
||||
return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
|
||||
}
|
||||
|
||||
static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
|
||||
@ -2765,6 +2779,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
||||
int err;
|
||||
bool use_ipv6 = false;
|
||||
__be16 default_port = vxlan->cfg.dst_port;
|
||||
struct net_device *lowerdev = NULL;
|
||||
|
||||
vxlan->net = src_net;
|
||||
|
||||
@ -2785,9 +2800,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
||||
}
|
||||
|
||||
if (conf->remote_ifindex) {
|
||||
struct net_device *lowerdev
|
||||
= __dev_get_by_index(src_net, conf->remote_ifindex);
|
||||
|
||||
lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
|
||||
dst->remote_ifindex = conf->remote_ifindex;
|
||||
|
||||
if (!lowerdev) {
|
||||
@ -2811,6 +2824,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
||||
needed_headroom = lowerdev->hard_header_len;
|
||||
}
|
||||
|
||||
if (conf->mtu) {
|
||||
err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
|
||||
needed_headroom += VXLAN6_HEADROOM;
|
||||
else
|
||||
|
@ -154,6 +154,7 @@ static const struct of_device_id whitelist_phys[] = {
|
||||
{ .compatible = "marvell,88E1111", },
|
||||
{ .compatible = "marvell,88e1116", },
|
||||
{ .compatible = "marvell,88e1118", },
|
||||
{ .compatible = "marvell,88e1145", },
|
||||
{ .compatible = "marvell,88e1149r", },
|
||||
{ .compatible = "marvell,88e1310", },
|
||||
{ .compatible = "marvell,88E1510", },
|
||||
|
@ -299,6 +299,7 @@ struct sk_buff;
|
||||
#else
|
||||
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
|
||||
#endif
|
||||
extern int sysctl_max_skb_frags;
|
||||
|
||||
typedef struct skb_frag_struct skb_frag_t;
|
||||
|
||||
|
@ -6,8 +6,8 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
void unix_inflight(struct file *fp);
|
||||
void unix_notinflight(struct file *fp);
|
||||
void unix_inflight(struct user_struct *user, struct file *fp);
|
||||
void unix_notinflight(struct user_struct *user, struct file *fp);
|
||||
void unix_gc(void);
|
||||
void wait_for_unix_gc(void);
|
||||
struct sock *unix_get_socket(struct file *filp);
|
||||
|
@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
|
||||
int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
|
||||
u8 *protocol, struct flowi4 *fl4);
|
||||
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
|
||||
|
||||
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
|
||||
|
@ -21,6 +21,7 @@ struct scm_creds {
|
||||
struct scm_fp_list {
|
||||
short count;
|
||||
short max;
|
||||
struct user_struct *user;
|
||||
struct file *fp[SCM_MAX_FD];
|
||||
};
|
||||
|
||||
|
@ -447,7 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
|
||||
|
||||
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_v4_mtu_reduced(struct sock *sk);
|
||||
void tcp_req_err(struct sock *sk, u32 seq);
|
||||
void tcp_req_err(struct sock *sk, u32 seq, bool abort);
|
||||
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
|
||||
struct sock *tcp_create_openreq_child(const struct sock *sk,
|
||||
struct request_sock *req,
|
||||
|
@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
|
||||
/* adjust offset of jmps if necessary */
|
||||
if (i < pos && i + insn->off + 1 > pos)
|
||||
insn->off += delta;
|
||||
else if (i > pos && i + insn->off + 1 < pos)
|
||||
else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
|
||||
insn->off -= delta;
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +208,6 @@ ip:
|
||||
case htons(ETH_P_IPV6): {
|
||||
const struct ipv6hdr *iph;
|
||||
struct ipv6hdr _iph;
|
||||
__be32 flow_label;
|
||||
|
||||
ipv6:
|
||||
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
|
||||
@ -230,8 +229,12 @@ ipv6:
|
||||
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
||||
}
|
||||
|
||||
flow_label = ip6_flowlabel(iph);
|
||||
if (flow_label) {
|
||||
if ((dissector_uses_key(flow_dissector,
|
||||
FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
|
||||
(flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
|
||||
ip6_flowlabel(iph)) {
|
||||
__be32 flow_label = ip6_flowlabel(iph);
|
||||
|
||||
if (dissector_uses_key(flow_dissector,
|
||||
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
|
||||
key_tags = skb_flow_dissector_target(flow_dissector,
|
||||
|
@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
|
||||
*fplp = fpl;
|
||||
fpl->count = 0;
|
||||
fpl->max = SCM_MAX_FD;
|
||||
fpl->user = NULL;
|
||||
}
|
||||
fpp = &fpl->fp[fpl->count];
|
||||
|
||||
@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
|
||||
*fpp++ = file;
|
||||
fpl->count++;
|
||||
}
|
||||
|
||||
if (!fpl->user)
|
||||
fpl->user = get_uid(current_user());
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
|
||||
scm->fp = NULL;
|
||||
for (i=fpl->count-1; i>=0; i--)
|
||||
fput(fpl->fp[i]);
|
||||
free_uid(fpl->user);
|
||||
kfree(fpl);
|
||||
}
|
||||
}
|
||||
@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
|
||||
for (i = 0; i < fpl->count; i++)
|
||||
get_file(fpl->fp[i]);
|
||||
new_fpl->max = new_fpl->count;
|
||||
new_fpl->user = get_uid(fpl->user);
|
||||
}
|
||||
return new_fpl;
|
||||
}
|
||||
|
@ -79,6 +79,8 @@
|
||||
|
||||
struct kmem_cache *skbuff_head_cache __read_mostly;
|
||||
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
|
||||
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
|
||||
EXPORT_SYMBOL(sysctl_max_skb_frags);
|
||||
|
||||
/**
|
||||
* skb_panic - private function for out-of-line support
|
||||
|
@ -26,6 +26,7 @@ static int zero = 0;
|
||||
static int one = 1;
|
||||
static int min_sndbuf = SOCK_MIN_SNDBUF;
|
||||
static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
||||
static int max_skb_frags = MAX_SKB_FRAGS;
|
||||
|
||||
static int net_msg_warn; /* Unused, but still a sysctl */
|
||||
|
||||
@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{
|
||||
.procname = "max_skb_frags",
|
||||
.data = &sysctl_max_skb_frags,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
.extra2 = &max_skb_frags,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -1240,6 +1240,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
|
||||
err = ipgre_newlink(net, dev, tb, NULL);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
/* openvswitch users expect packet sizes to be unrestricted,
|
||||
* so set the largest MTU we can.
|
||||
*/
|
||||
err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
return dev;
|
||||
out:
|
||||
free_netdev(dev);
|
||||
|
@ -943,17 +943,31 @@ done:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
|
||||
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
||||
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
|
||||
int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
|
||||
|
||||
if (new_mtu < 68 ||
|
||||
new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
if (strict)
|
||||
return -EINVAL;
|
||||
|
||||
new_mtu = max_mtu;
|
||||
}
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
|
||||
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
return __ip_tunnel_change_mtu(dev, new_mtu, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
|
||||
|
||||
static void ip_tunnel_dev_free(struct net_device *dev)
|
||||
|
@ -940,7 +940,7 @@ new_segment:
|
||||
|
||||
i = skb_shinfo(skb)->nr_frags;
|
||||
can_coalesce = skb_can_coalesce(skb, i, page, offset);
|
||||
if (!can_coalesce && i >= MAX_SKB_FRAGS) {
|
||||
if (!can_coalesce && i >= sysctl_max_skb_frags) {
|
||||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
@ -1213,7 +1213,7 @@ new_segment:
|
||||
|
||||
if (!skb_can_coalesce(skb, i, pfrag->page,
|
||||
pfrag->offset)) {
|
||||
if (i == MAX_SKB_FRAGS || !sg) {
|
||||
if (i == sysctl_max_skb_frags || !sg) {
|
||||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
|
||||
|
||||
|
||||
/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
|
||||
void tcp_req_err(struct sock *sk, u32 seq)
|
||||
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
|
||||
{
|
||||
struct request_sock *req = inet_reqsk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
@ -323,7 +323,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
|
||||
|
||||
if (seq != tcp_rsk(req)->snt_isn) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
} else {
|
||||
} else if (abort) {
|
||||
/*
|
||||
* Still in SYN_RECV, just remove it silently.
|
||||
* There is no good way to pass the error to the newly
|
||||
@ -383,7 +383,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
||||
}
|
||||
seq = ntohl(th->seq);
|
||||
if (sk->sk_state == TCP_NEW_SYN_RECV)
|
||||
return tcp_req_err(sk, seq);
|
||||
return tcp_req_err(sk, seq,
|
||||
type == ICMP_PARAMETERPROB ||
|
||||
type == ICMP_TIME_EXCEEDED ||
|
||||
(type == ICMP_DEST_UNREACH &&
|
||||
(code == ICMP_NET_UNREACH ||
|
||||
code == ICMP_HOST_UNREACH)));
|
||||
|
||||
bh_lock_sock(sk);
|
||||
/* If too many ICMPs get dropped on busy
|
||||
|
@ -3538,6 +3538,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct inet6_dev *idev = ifp->idev;
|
||||
struct net_device *dev = idev->dev;
|
||||
bool notify = false;
|
||||
|
||||
addrconf_join_solict(dev, &ifp->addr);
|
||||
|
||||
@ -3583,7 +3584,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
|
||||
/* Because optimistic nodes can use this address,
|
||||
* notify listeners. If DAD fails, RTM_DELADDR is sent.
|
||||
*/
|
||||
ipv6_ifa_notify(RTM_NEWADDR, ifp);
|
||||
notify = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3591,6 +3592,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
|
||||
out:
|
||||
spin_unlock(&ifp->lock);
|
||||
read_unlock_bh(&idev->lock);
|
||||
if (notify)
|
||||
ipv6_ifa_notify(RTM_NEWADDR, ifp);
|
||||
}
|
||||
|
||||
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
|
||||
|
@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
|
||||
}
|
||||
spin_lock_bh(&ip6_sk_fl_lock);
|
||||
for (sflp = &np->ipv6_fl_list;
|
||||
(sfl = rcu_dereference(*sflp)) != NULL;
|
||||
(sfl = rcu_dereference_protected(*sflp,
|
||||
lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
|
||||
sflp = &sfl->next) {
|
||||
if (sfl->fl->label == freq.flr_label) {
|
||||
if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
|
||||
np->flow_label &= ~IPV6_FLOWLABEL_MASK;
|
||||
*sflp = rcu_dereference(sfl->next);
|
||||
*sflp = sfl->next;
|
||||
spin_unlock_bh(&ip6_sk_fl_lock);
|
||||
fl_release(sfl->fl);
|
||||
kfree_rcu(sfl, rcu);
|
||||
|
@ -327,6 +327,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
struct tcp_sock *tp;
|
||||
__u32 seq, snd_una;
|
||||
struct sock *sk;
|
||||
bool fatal;
|
||||
int err;
|
||||
|
||||
sk = __inet6_lookup_established(net, &tcp_hashinfo,
|
||||
@ -345,8 +346,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
return;
|
||||
}
|
||||
seq = ntohl(th->seq);
|
||||
fatal = icmpv6_err_convert(type, code, &err);
|
||||
if (sk->sk_state == TCP_NEW_SYN_RECV)
|
||||
return tcp_req_err(sk, seq);
|
||||
return tcp_req_err(sk, seq, fatal);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
|
||||
@ -400,7 +402,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
goto out;
|
||||
}
|
||||
|
||||
icmpv6_err_convert(type, code, &err);
|
||||
|
||||
/* Might be for an request_sock */
|
||||
switch (sk->sk_state) {
|
||||
|
@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
|
||||
struct vxlan_config conf = {
|
||||
.no_share = true,
|
||||
.flags = VXLAN_F_COLLECT_METADATA,
|
||||
/* Don't restrict the packets that can be sent by MTU */
|
||||
.mtu = IP_MAX_MTU,
|
||||
};
|
||||
|
||||
if (!options) {
|
||||
|
@ -5538,6 +5538,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
|
||||
struct sctp_hmac_algo_param *hmacs;
|
||||
__u16 data_len = 0;
|
||||
u32 num_idents;
|
||||
int i;
|
||||
|
||||
if (!ep->auth_enable)
|
||||
return -EACCES;
|
||||
@ -5555,8 +5556,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
|
||||
return -EFAULT;
|
||||
if (put_user(num_idents, &p->shmac_num_idents))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < num_idents; i++) {
|
||||
__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
|
||||
|
||||
if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
UNIXCB(skb).fp = NULL;
|
||||
|
||||
for (i = scm->fp->count-1; i >= 0; i--)
|
||||
unix_notinflight(scm->fp->fp[i]);
|
||||
unix_notinflight(scm->fp->user, scm->fp->fp[i]);
|
||||
}
|
||||
|
||||
static void unix_destruct_scm(struct sk_buff *skb)
|
||||
@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = scm->fp->count - 1; i >= 0; i--)
|
||||
unix_inflight(scm->fp->fp[i]);
|
||||
unix_inflight(scm->fp->user, scm->fp->fp[i]);
|
||||
return max_level;
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
|
||||
* descriptor if it is for an AF_UNIX socket.
|
||||
*/
|
||||
|
||||
void unix_inflight(struct file *fp)
|
||||
void unix_inflight(struct user_struct *user, struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
|
||||
}
|
||||
unix_tot_inflight++;
|
||||
}
|
||||
fp->f_cred->user->unix_inflight++;
|
||||
user->unix_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
void unix_notinflight(struct file *fp)
|
||||
void unix_notinflight(struct user_struct *user, struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
|
||||
list_del_init(&u->link);
|
||||
unix_tot_inflight--;
|
||||
}
|
||||
fp->f_cred->user->unix_inflight--;
|
||||
user->unix_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
|
@ -83,6 +83,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
|
||||
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
|
||||
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
|
||||
{ SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
|
||||
{ SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
|
||||
};
|
||||
|
||||
static struct nlmsg_perm nlmsg_xfrm_perms[] =
|
||||
|
Loading…
Reference in New Issue
Block a user