Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor overlapping changes in net/ipv4/ipmr.c, in 'net' we were fixing the "BH-ness" of the counter bumps whilst in 'net-next' the functions were modified to take an explicit 'net' parameter. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
73186df8d7
@ -301,6 +301,8 @@ isdn_ppp_open(int min, struct file *file)
|
||||
is->compflags = 0;
|
||||
|
||||
is->reset = isdn_ppp_ccp_reset_alloc(is);
|
||||
if (!is->reset)
|
||||
return -ENOMEM;
|
||||
|
||||
is->lp = NULL;
|
||||
is->mp_seqno = 0; /* MP sequence number */
|
||||
@ -320,6 +322,10 @@ isdn_ppp_open(int min, struct file *file)
|
||||
* VJ header compression init
|
||||
*/
|
||||
is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
|
||||
if (IS_ERR(is->slcomp)) {
|
||||
isdn_ppp_ccp_reset_free(is);
|
||||
return PTR_ERR(is->slcomp);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_IPPP_FILTER
|
||||
is->pass_filter = NULL;
|
||||
@ -567,10 +573,8 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
|
||||
is->maxcid = val;
|
||||
#ifdef CONFIG_ISDN_PPP_VJ
|
||||
sltmp = slhc_init(16, val);
|
||||
if (!sltmp) {
|
||||
printk(KERN_ERR "ippp, can't realloc slhc struct\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (IS_ERR(sltmp))
|
||||
return PTR_ERR(sltmp);
|
||||
if (is->slcomp)
|
||||
slhc_free(is->slcomp);
|
||||
is->slcomp = sltmp;
|
||||
|
@ -915,7 +915,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
nla_put(skb, IFLA_CAN_BITTIMING_CONST,
|
||||
sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
|
||||
|
||||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
|
||||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
|
||||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
|
||||
nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
|
||||
nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
|
||||
|
@ -1055,8 +1055,10 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
|
||||
}
|
||||
|
||||
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
||||
if (mode == GENET_POWER_PASSIVE)
|
||||
if (mode == GENET_POWER_PASSIVE) {
|
||||
bcmgenet_phy_power_set(priv->dev, true);
|
||||
bcmgenet_mii_reset(priv->dev);
|
||||
}
|
||||
}
|
||||
|
||||
/* ioctl handle special commands that are not present in ethtool. */
|
||||
|
@ -673,6 +673,7 @@ int bcmgenet_mii_init(struct net_device *dev);
|
||||
int bcmgenet_mii_config(struct net_device *dev);
|
||||
int bcmgenet_mii_probe(struct net_device *dev);
|
||||
void bcmgenet_mii_exit(struct net_device *dev);
|
||||
void bcmgenet_mii_reset(struct net_device *dev);
|
||||
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
|
||||
void bcmgenet_mii_setup(struct net_device *dev);
|
||||
|
||||
|
@ -163,6 +163,7 @@ void bcmgenet_mii_setup(struct net_device *dev)
|
||||
phy_print_status(phydev);
|
||||
}
|
||||
|
||||
|
||||
static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
|
||||
struct fixed_phy_status *status)
|
||||
{
|
||||
@ -172,6 +173,22 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Perform a voluntary PHY software reset, since the EPHY is very finicky about
|
||||
* not doing it and will start corrupting packets
|
||||
*/
|
||||
void bcmgenet_mii_reset(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (GENET_IS_V4(priv))
|
||||
return;
|
||||
|
||||
if (priv->phydev) {
|
||||
phy_init_hw(priv->phydev);
|
||||
phy_start_aneg(priv->phydev);
|
||||
}
|
||||
}
|
||||
|
||||
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
@ -214,6 +231,7 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
|
||||
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
|
||||
reg |= EXT_PWR_DN_EN_LD;
|
||||
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
||||
bcmgenet_mii_reset(dev);
|
||||
}
|
||||
|
||||
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
|
||||
|
@ -3261,7 +3261,7 @@ static void fec_reset_phy(struct platform_device *pdev)
|
||||
return;
|
||||
}
|
||||
msleep(msec);
|
||||
gpio_set_value(phy_reset, 1);
|
||||
gpio_set_value_cansleep(phy_reset, 1);
|
||||
}
|
||||
#else /* CONFIG_OF */
|
||||
static void fec_reset_phy(struct platform_device *pdev)
|
||||
|
@ -1212,15 +1212,15 @@ static int sh_eth_ring_init(struct net_device *ndev)
|
||||
mdp->rx_buf_sz += NET_IP_ALIGN;
|
||||
|
||||
/* Allocate RX and TX skb rings */
|
||||
mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
|
||||
sizeof(*mdp->rx_skbuff), GFP_KERNEL);
|
||||
mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
|
||||
GFP_KERNEL);
|
||||
if (!mdp->rx_skbuff) {
|
||||
ret = -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
||||
mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
|
||||
sizeof(*mdp->tx_skbuff), GFP_KERNEL);
|
||||
mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
|
||||
GFP_KERNEL);
|
||||
if (!mdp->tx_skbuff) {
|
||||
ret = -ENOMEM;
|
||||
goto skb_ring_free;
|
||||
@ -1232,7 +1232,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
|
||||
GFP_KERNEL);
|
||||
if (!mdp->rx_ring) {
|
||||
ret = -ENOMEM;
|
||||
goto desc_ring_free;
|
||||
goto skb_ring_free;
|
||||
}
|
||||
|
||||
mdp->dirty_rx = 0;
|
||||
|
@ -1855,7 +1855,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
|
||||
unsigned int write_ptr;
|
||||
efx_qword_t *txd;
|
||||
|
||||
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
|
||||
tx_queue->xmit_more_available = false;
|
||||
if (unlikely(tx_queue->write_count == tx_queue->insert_count))
|
||||
return;
|
||||
|
||||
do {
|
||||
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
|
||||
|
@ -321,7 +321,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
|
||||
unsigned write_ptr;
|
||||
unsigned old_write_count = tx_queue->write_count;
|
||||
|
||||
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
|
||||
tx_queue->xmit_more_available = false;
|
||||
if (unlikely(tx_queue->write_count == tx_queue->insert_count))
|
||||
return;
|
||||
|
||||
do {
|
||||
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
|
||||
|
@ -219,6 +219,7 @@ struct efx_tx_buffer {
|
||||
* @tso_packets: Number of packets via the TSO xmit path
|
||||
* @pushes: Number of times the TX push feature has been used
|
||||
* @pio_packets: Number of times the TX PIO feature has been used
|
||||
* @xmit_more_available: Are any packets waiting to be pushed to the NIC
|
||||
* @empty_read_count: If the completion path has seen the queue as empty
|
||||
* and the transmission path has not yet checked this, the value of
|
||||
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
|
||||
@ -253,6 +254,7 @@ struct efx_tx_queue {
|
||||
unsigned int tso_packets;
|
||||
unsigned int pushes;
|
||||
unsigned int pio_packets;
|
||||
bool xmit_more_available;
|
||||
/* Statistics to supplement MAC stats */
|
||||
unsigned long tx_packets;
|
||||
|
||||
|
@ -431,8 +431,20 @@ finish_packet:
|
||||
efx_tx_maybe_stop_queue(tx_queue);
|
||||
|
||||
/* Pass off to hardware */
|
||||
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
|
||||
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
|
||||
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
|
||||
|
||||
/* There could be packets left on the partner queue if those
|
||||
* SKBs had skb->xmit_more set. If we do not push those they
|
||||
* could be left for a long time and cause a netdev watchdog.
|
||||
*/
|
||||
if (txq2->xmit_more_available)
|
||||
efx_nic_push_buffers(txq2);
|
||||
|
||||
efx_nic_push_buffers(tx_queue);
|
||||
} else {
|
||||
tx_queue->xmit_more_available = skb->xmit_more;
|
||||
}
|
||||
|
||||
tx_queue->tx_packets++;
|
||||
|
||||
@ -722,6 +734,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
tx_queue->read_count = 0;
|
||||
tx_queue->old_read_count = 0;
|
||||
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
|
||||
tx_queue->xmit_more_available = false;
|
||||
|
||||
/* Set up TX descriptor ring */
|
||||
efx_nic_init_tx(tx_queue);
|
||||
@ -747,6 +760,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
|
||||
++tx_queue->read_count;
|
||||
}
|
||||
tx_queue->xmit_more_available = false;
|
||||
netdev_tx_reset_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
@ -1302,8 +1316,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
|
||||
efx_tx_maybe_stop_queue(tx_queue);
|
||||
|
||||
/* Pass off to hardware */
|
||||
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
|
||||
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
|
||||
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
|
||||
|
||||
/* There could be packets left on the partner queue if those
|
||||
* SKBs had skb->xmit_more set. If we do not push those they
|
||||
* could be left for a long time and cause a netdev watchdog.
|
||||
*/
|
||||
if (txq2->xmit_more_available)
|
||||
efx_nic_push_buffers(txq2);
|
||||
|
||||
efx_nic_push_buffers(tx_queue);
|
||||
} else {
|
||||
tx_queue->xmit_more_available = skb->xmit_more;
|
||||
}
|
||||
|
||||
tx_queue->tso_bursts++;
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -1052,6 +1052,7 @@ static int smsc911x_mii_probe(struct net_device *dev)
|
||||
#ifdef USE_PHY_WORK_AROUND
|
||||
if (smsc911x_phy_loopbacktest(dev) < 0) {
|
||||
SMSC_WARN(pdata, hw, "Failed Loop Back Test");
|
||||
phy_disconnect(phydev);
|
||||
return -ENODEV;
|
||||
}
|
||||
SMSC_TRACE(pdata, hw, "Passed Loop Back Test");
|
||||
|
@ -721,10 +721,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
|
||||
if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
|
||||
|
||||
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_SOFTWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
|
||||
if (priv->ptp_clock)
|
||||
|
@ -205,6 +205,37 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
|
||||
}
|
||||
EXPORT_SYMBOL(phy_device_create);
|
||||
|
||||
/* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers.
|
||||
* @bus: the target MII bus
|
||||
* @addr: PHY address on the MII bus
|
||||
* @dev_addr: MMD address in the PHY.
|
||||
* @devices_in_package: where to store the devices in package information.
|
||||
*
|
||||
* Description: reads devices in package registers of a MMD at @dev_addr
|
||||
* from PHY at @addr on @bus.
|
||||
*
|
||||
* Returns: 0 on success, -EIO on failure.
|
||||
*/
|
||||
static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
|
||||
u32 *devices_in_package)
|
||||
{
|
||||
int phy_reg, reg_addr;
|
||||
|
||||
reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS2;
|
||||
phy_reg = mdiobus_read(bus, addr, reg_addr);
|
||||
if (phy_reg < 0)
|
||||
return -EIO;
|
||||
*devices_in_package = (phy_reg & 0xffff) << 16;
|
||||
|
||||
reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1;
|
||||
phy_reg = mdiobus_read(bus, addr, reg_addr);
|
||||
if (phy_reg < 0)
|
||||
return -EIO;
|
||||
*devices_in_package |= (phy_reg & 0xffff);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs.
|
||||
* @bus: the target MII bus
|
||||
@ -223,38 +254,31 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
|
||||
int phy_reg;
|
||||
int i, reg_addr;
|
||||
const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
|
||||
u32 *devs = &c45_ids->devices_in_package;
|
||||
|
||||
/* Find first non-zero Devices In package. Device
|
||||
* zero is reserved, so don't probe it.
|
||||
/* Find first non-zero Devices In package. Device zero is reserved
|
||||
* for 802.3 c45 complied PHYs, so don't probe it at first.
|
||||
*/
|
||||
for (i = 1;
|
||||
i < num_ids && c45_ids->devices_in_package == 0;
|
||||
i++) {
|
||||
retry: reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2;
|
||||
phy_reg = mdiobus_read(bus, addr, reg_addr);
|
||||
for (i = 1; i < num_ids && *devs == 0; i++) {
|
||||
phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs);
|
||||
if (phy_reg < 0)
|
||||
return -EIO;
|
||||
c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
|
||||
|
||||
reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1;
|
||||
phy_reg = mdiobus_read(bus, addr, reg_addr);
|
||||
if (phy_reg < 0)
|
||||
return -EIO;
|
||||
c45_ids->devices_in_package |= (phy_reg & 0xffff);
|
||||
|
||||
if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) {
|
||||
if (i) {
|
||||
/* If mostly Fs, there is no device there,
|
||||
* then let's continue to probe more, as some
|
||||
* 10G PHYs have zero Devices In package,
|
||||
* e.g. Cortina CS4315/CS4340 PHY.
|
||||
*/
|
||||
i = 0;
|
||||
goto retry;
|
||||
} else {
|
||||
/* no device there, let's get out of here */
|
||||
if ((*devs & 0x1fffffff) == 0x1fffffff) {
|
||||
/* If mostly Fs, there is no device there,
|
||||
* then let's continue to probe more, as some
|
||||
* 10G PHYs have zero Devices In package,
|
||||
* e.g. Cortina CS4315/CS4340 PHY.
|
||||
*/
|
||||
phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs);
|
||||
if (phy_reg < 0)
|
||||
return -EIO;
|
||||
/* no device there, let's get out of here */
|
||||
if ((*devs & 0x1fffffff) == 0x1fffffff) {
|
||||
*phy_id = 0xffffffff;
|
||||
return 0;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -721,10 +721,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
val &= 0xffff;
|
||||
}
|
||||
vj = slhc_init(val2+1, val+1);
|
||||
if (!vj) {
|
||||
netdev_err(ppp->dev,
|
||||
"PPP: no memory (VJ compressor)\n");
|
||||
err = -ENOMEM;
|
||||
if (IS_ERR(vj)) {
|
||||
err = PTR_ERR(vj);
|
||||
break;
|
||||
}
|
||||
ppp_lock(ppp);
|
||||
|
@ -84,8 +84,9 @@ static long decode(unsigned char **cpp);
|
||||
static unsigned char * put16(unsigned char *cp, unsigned short x);
|
||||
static unsigned short pull16(unsigned char **cpp);
|
||||
|
||||
/* Initialize compression data structure
|
||||
/* Allocate compression data structure
|
||||
* slots must be in range 0 to 255 (zero meaning no compression)
|
||||
* Returns pointer to structure or ERR_PTR() on error.
|
||||
*/
|
||||
struct slcompress *
|
||||
slhc_init(int rslots, int tslots)
|
||||
@ -94,11 +95,14 @@ slhc_init(int rslots, int tslots)
|
||||
register struct cstate *ts;
|
||||
struct slcompress *comp;
|
||||
|
||||
if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
|
||||
if (! comp)
|
||||
goto out_fail;
|
||||
|
||||
if ( rslots > 0 && rslots < 256 ) {
|
||||
if (rslots > 0) {
|
||||
size_t rsize = rslots * sizeof(struct cstate);
|
||||
comp->rstate = kzalloc(rsize, GFP_KERNEL);
|
||||
if (! comp->rstate)
|
||||
@ -106,7 +110,7 @@ slhc_init(int rslots, int tslots)
|
||||
comp->rslot_limit = rslots - 1;
|
||||
}
|
||||
|
||||
if ( tslots > 0 && tslots < 256 ) {
|
||||
if (tslots > 0) {
|
||||
size_t tsize = tslots * sizeof(struct cstate);
|
||||
comp->tstate = kzalloc(tsize, GFP_KERNEL);
|
||||
if (! comp->tstate)
|
||||
@ -141,7 +145,7 @@ out_free2:
|
||||
out_free:
|
||||
kfree(comp);
|
||||
out_fail:
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
|
||||
|
@ -164,7 +164,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
|
||||
if (cbuff == NULL)
|
||||
goto err_exit;
|
||||
slcomp = slhc_init(16, 16);
|
||||
if (slcomp == NULL)
|
||||
if (IS_ERR(slcomp))
|
||||
goto err_exit;
|
||||
#endif
|
||||
spin_lock_bh(&sl->lock);
|
||||
|
@ -485,6 +485,10 @@ static const struct usb_device_id products[] = {
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
|
||||
/* 3. Combined interface devices matching on interface number */
|
||||
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
|
||||
@ -737,7 +741,6 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
@ -41,7 +41,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
|
||||
|
||||
static inline void inet_ctl_sock_destroy(struct sock *sk)
|
||||
{
|
||||
sock_release(sk->sk_socket);
|
||||
if (sk)
|
||||
sock_release(sk->sk_socket);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -317,7 +317,7 @@ void fib_flush_external(struct net *net);
|
||||
|
||||
/* Exported by fib_semantics.c */
|
||||
int ip_fib_check_default(__be32 gw, struct net_device *dev);
|
||||
int fib_sync_down_dev(struct net_device *dev, unsigned long event);
|
||||
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
|
||||
int fib_sync_down_addr(struct net *net, __be32 local);
|
||||
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
|
||||
|
||||
|
@ -1112,9 +1112,10 @@ static void nl_fib_lookup_exit(struct net *net)
|
||||
net->ipv4.fibnl = NULL;
|
||||
}
|
||||
|
||||
static void fib_disable_ip(struct net_device *dev, unsigned long event)
|
||||
static void fib_disable_ip(struct net_device *dev, unsigned long event,
|
||||
bool force)
|
||||
{
|
||||
if (fib_sync_down_dev(dev, event))
|
||||
if (fib_sync_down_dev(dev, event, force))
|
||||
fib_flush(dev_net(dev));
|
||||
rt_cache_flush(dev_net(dev));
|
||||
arp_ifdown(dev);
|
||||
@ -1142,7 +1143,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
|
||||
/* Last address was deleted from this interface.
|
||||
* Disable IP.
|
||||
*/
|
||||
fib_disable_ip(dev, event);
|
||||
fib_disable_ip(dev, event, true);
|
||||
} else {
|
||||
rt_cache_flush(dev_net(dev));
|
||||
}
|
||||
@ -1159,7 +1160,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
|
||||
unsigned int flags;
|
||||
|
||||
if (event == NETDEV_UNREGISTER) {
|
||||
fib_disable_ip(dev, event);
|
||||
fib_disable_ip(dev, event, true);
|
||||
rt_flush_dev(dev);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -1180,14 +1181,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
|
||||
rt_cache_flush(net);
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
fib_disable_ip(dev, event);
|
||||
fib_disable_ip(dev, event, false);
|
||||
break;
|
||||
case NETDEV_CHANGE:
|
||||
flags = dev_get_flags(dev);
|
||||
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
|
||||
fib_sync_up(dev, RTNH_F_LINKDOWN);
|
||||
else
|
||||
fib_sync_down_dev(dev, event);
|
||||
fib_sync_down_dev(dev, event, false);
|
||||
/* fall through */
|
||||
case NETDEV_CHANGEMTU:
|
||||
rt_cache_flush(net);
|
||||
|
@ -1343,7 +1343,13 @@ int fib_sync_down_addr(struct net *net, __be32 local)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int fib_sync_down_dev(struct net_device *dev, unsigned long event)
|
||||
/* Event force Flags Description
|
||||
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
|
||||
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
|
||||
* NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
|
||||
* NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
|
||||
*/
|
||||
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
|
||||
{
|
||||
int ret = 0;
|
||||
int scope = RT_SCOPE_NOWHERE;
|
||||
@ -1352,8 +1358,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
|
||||
struct hlist_head *head = &fib_info_devhash[hash];
|
||||
struct fib_nh *nh;
|
||||
|
||||
if (event == NETDEV_UNREGISTER ||
|
||||
event == NETDEV_DOWN)
|
||||
if (force)
|
||||
scope = -1;
|
||||
|
||||
hlist_for_each_entry(nh, head, nh_hash) {
|
||||
@ -1498,6 +1503,13 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
|
||||
if (!(dev->flags & IFF_UP))
|
||||
return 0;
|
||||
|
||||
if (nh_flags & RTNH_F_DEAD) {
|
||||
unsigned int flags = dev_get_flags(dev);
|
||||
|
||||
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
|
||||
nh_flags |= RTNH_F_LINKDOWN;
|
||||
}
|
||||
|
||||
prev_fi = NULL;
|
||||
hash = fib_devindex_hashfn(dev->ifindex);
|
||||
head = &fib_info_devhash[hash];
|
||||
|
@ -1683,8 +1683,8 @@ static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
|
||||
{
|
||||
struct ip_options *opt = &(IPCB(skb)->opt);
|
||||
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
|
||||
if (unlikely(opt->optlen))
|
||||
ip_forward_options(skb);
|
||||
@ -1746,7 +1746,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
|
||||
* to blackhole.
|
||||
*/
|
||||
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_FRAGFAILS);
|
||||
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
|
||||
ip_rt_put(rt);
|
||||
goto out_free;
|
||||
}
|
||||
|
@ -2080,7 +2080,6 @@ static int ip6_route_del(struct fib6_config *cfg)
|
||||
|
||||
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct netevent_redirect netevent;
|
||||
struct rt6_info *rt, *nrt = NULL;
|
||||
struct ndisc_options ndopts;
|
||||
@ -2141,7 +2140,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
|
||||
}
|
||||
|
||||
rt = (struct rt6_info *) dst;
|
||||
if (rt == net->ipv6.ip6_null_entry) {
|
||||
if (rt->rt6i_flags & RTF_REJECT) {
|
||||
net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
|
||||
return;
|
||||
}
|
||||
|
@ -1394,34 +1394,20 @@ static int ipip6_tunnel_init(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
||||
static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
struct iphdr *iph = &tunnel->parms.iph;
|
||||
struct net *net = dev_net(dev);
|
||||
struct sit_net *sitn = net_generic(net, sit_net_id);
|
||||
|
||||
tunnel->dev = dev;
|
||||
tunnel->net = dev_net(dev);
|
||||
|
||||
iph->version = 4;
|
||||
iph->protocol = IPPROTO_IPV6;
|
||||
iph->ihl = 5;
|
||||
iph->ttl = 64;
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
|
||||
if (!tunnel->dst_cache) {
|
||||
free_percpu(dev->tstats);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_hold(dev);
|
||||
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
|
||||
@ -1831,23 +1817,19 @@ static int __net_init sit_init_net(struct net *net)
|
||||
*/
|
||||
sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
|
||||
err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
|
||||
if (err)
|
||||
goto err_dev_free;
|
||||
|
||||
ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
|
||||
err = register_netdev(sitn->fb_tunnel_dev);
|
||||
if (err)
|
||||
goto err_reg_dev;
|
||||
|
||||
ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
|
||||
ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
|
||||
|
||||
t = netdev_priv(sitn->fb_tunnel_dev);
|
||||
|
||||
strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
|
||||
return 0;
|
||||
|
||||
err_reg_dev:
|
||||
dev_put(sitn->fb_tunnel_dev);
|
||||
err_dev_free:
|
||||
ipip6_dev_free(sitn->fb_tunnel_dev);
|
||||
err_alloc_dev:
|
||||
return err;
|
||||
|
@ -144,6 +144,16 @@ static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
break;
|
||||
}
|
||||
|
||||
static void tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct xfrm6_tunnel *handler;
|
||||
|
||||
for_each_tunnel_rcu(tunnel46_handlers, handler)
|
||||
if (!handler->err_handler(skb, opt, type, code, offset, info))
|
||||
break;
|
||||
}
|
||||
|
||||
static const struct inet6_protocol tunnel6_protocol = {
|
||||
.handler = tunnel6_rcv,
|
||||
.err_handler = tunnel6_err,
|
||||
@ -152,7 +162,7 @@ static const struct inet6_protocol tunnel6_protocol = {
|
||||
|
||||
static const struct inet6_protocol tunnel46_protocol = {
|
||||
.handler = tunnel46_rcv,
|
||||
.err_handler = tunnel6_err,
|
||||
.err_handler = tunnel46_err,
|
||||
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
|
||||
};
|
||||
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include <linux/tipc_netlink.h>
|
||||
#include "core.h"
|
||||
#include "bearer.h"
|
||||
#include "msg.h"
|
||||
|
||||
/* IANA assigned UDP port */
|
||||
#define UDP_PORT_DEFAULT 6118
|
||||
@ -220,6 +221,10 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct udp_bearer *ub;
|
||||
struct tipc_bearer *b;
|
||||
int usr = msg_user(buf_msg(skb));
|
||||
|
||||
if ((usr == LINK_PROTOCOL) || (usr == NAME_DISTRIBUTOR))
|
||||
skb_linearize(skb);
|
||||
|
||||
ub = rcu_dereference_sk_user_data(sk);
|
||||
if (!ub) {
|
||||
|
Loading…
Reference in New Issue
Block a user