Including fixes from netfilter, wireless and bluetooth.
Nothing major, regression fixes are mostly in drivers, two more of those are flowing towards us thru various trees. I wish some of the changes went into -rc5, we'll try to keep an eye on frequency of PRs from sub-trees. Also disproportional number of fixes for bugs added in v6.4, strange coincidence. Current release - regressions: - igc: fix LED-related deadlock on driver unbind - wifi: mac80211: small fixes to recent clean up of the connection process - Revert "wifi: iwlwifi: bump FW API to 90 for BZ/SC devices", kernel doesn't have all the code to deal with that version, yet - Bluetooth: - set power_ctrl_enabled on NULL returned by gpiod_get_optional() - qca: fix invalid device address check, again - eth: ravb: fix registered interrupt names Current release - new code bugs: - wifi: mac80211: check EHT/TTLM action frame length Previous releases - regressions: - fix sk_memory_allocated_{add|sub} for architectures where __this_cpu_{add|sub}* are not IRQ-safe - dsa: mv88e6xx: fix link setup for 88E6250 Previous releases - always broken: - ip: validate dev returned from __in_dev_get_rcu(), prevent possible null-derefs in a few places - switch number of for_each_rcu() loops using call_rcu() on the iterator to for_each_safe() - macsec: fix isolation of broadcast traffic in presence of offload - vxlan: drop packets from invalid source address - eth: mlxsw: trap and ACL programming fixes - eth: bnxt: PCIe error recovery fixes, fix counting dropped packets - Bluetooth: - lots of fixes for the command submission rework from v6.4 - qca: fix NULL-deref on non-serdev suspend Misc: - tools: ynl: don't ignore errors in NLMSG_DONE messages Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmYqjvgACgkQMUZtbf5S IrvxBA/9HdiiBU/qWdlZ5BorvVFj5XmOiGGD0UagKD2VZCxdLX8S/yfmY3KMoohy Dls5c3WxQbJbGsoIMEU6ztE0Iv1YYl1wamTfbyUDwv2ZMKR/vN5uzacB4CS9/FJ0 vOQO1Y/VWx+uoA1gXRsY8Ffmh2ZMKdwoiKdpdRf/ADgPB8hNQYx78PqTBvKusqBa go1mahZbtsYIxLn/oL0xKQRKRZUY1T5T8zQ02i+8MvWBJDyRWCCaOICQus7FBdtz JAy5IyztzH0cYXgC0aRTPJkbwqXdpXjSoeOwNElRtUpD98zprDm16jqpSGrwhJoP AaWo5+1o908aOd+chhoCqfrEGbraMSRgvCTNMemPxL8cNF4JJfdp1A+v0+cZKlMy yjGTKoFZX6GPbOFYPC+rF8Zm6WzDsLcit/r01RTvf1JLf+Jdft72QwQec0rQykEV ATrYAQAW/B6zcfOmIXngFuCkO7KM9Yp2BSQNAtYOQR2GKijmALO74suIbNujP3hU kn25jnw0Fwzv5RIWluFK+V2AcW8cd1JZMbq8NQzhOXmrHbP4OmaYQrk0vkk8f9b9 q5BK4C4/JcjCdEBGe38BlPFUx3Jr6xKOcF/DoAnhehwwEpCi5El9S5l7a4+HNBSh e1c/1vvcO54m4onXYJ+CH5clQLGs5NU71aqtBeleF5YoDLvwD8g= =EQyI -----END PGP SIGNATURE----- Merge tag 'net-6.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from netfilter, wireless and bluetooth. Nothing major, regression fixes are mostly in drivers, two more of those are flowing towards us thru various trees. I wish some of the changes went into -rc5, we'll try to keep an eye on frequency of PRs from sub-trees. Also disproportional number of fixes for bugs added in v6.4, strange coincidence. Current release - regressions: - igc: fix LED-related deadlock on driver unbind - wifi: mac80211: small fixes to recent clean up of the connection process - Revert "wifi: iwlwifi: bump FW API to 90 for BZ/SC devices", kernel doesn't have all the code to deal with that version, yet - Bluetooth: - set power_ctrl_enabled on NULL returned by gpiod_get_optional() - qca: fix invalid device address check, again - eth: ravb: fix registered interrupt names Current release - new code bugs: - wifi: mac80211: check EHT/TTLM action frame length Previous releases - regressions: - fix sk_memory_allocated_{add|sub} for architectures where __this_cpu_{add|sub}* are not IRQ-safe - dsa: mv88e6xx: fix link setup for 88E6250 Previous releases - always broken: - ip: validate dev returned from __in_dev_get_rcu(), prevent possible null-derefs in a few places - switch number of for_each_rcu() loops using call_rcu() on the iterator to for_each_safe() - macsec: fix isolation of broadcast traffic in presence of offload - vxlan: drop packets from invalid source address - eth: mlxsw: trap and ACL programming fixes - eth: bnxt: PCIe error recovery fixes, fix counting dropped packets - Bluetooth: - lots of fixes for the command submission rework from v6.4 - qca: fix NULL-deref on non-serdev suspend Misc: - tools: ynl: don't ignore errors in NLMSG_DONE messages" * tag 'net-6.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (88 commits) af_unix: Suppress false-positive lockdep splat for spin_lock() in __unix_gc(). net: b44: set pause params only when interface is up tls: fix lockless read of strp->msg_ready in ->poll dpll: fix dpll_pin_on_pin_register() for multiple parent pins net: ravb: Fix registered interrupt names octeontx2-af: fix the double free in rvu_npc_freemem() net: ethernet: ti: am65-cpts: Fix PTPv1 message type on TX packets ice: fix LAG and VF lock dependency in ice_reset_vf() iavf: Fix TC config comparison with existing adapter TC config i40e: Report MFS in decimal base instead of hex i40e: Do not use WQ_MEM_RECLAIM flag for workqueue net: ti: icssg-prueth: Fix signedness bug in prueth_init_rx_chns() net/mlx5e: Advertise mlx5 ethernet driver updates sk_buff md_dst for MACsec macsec: Detect if Rx skb is macsec-related for offloading devices that update md_dst ethernet: Add helper for assigning packet type when dest address does not match device address macsec: Enable devices to advertise whether they update sk_buff md_dst during offloads net: phy: dp83869: Fix MII mode failure netfilter: nf_tables: honor table dormant flag from netdev release event path eth: bnxt: fix counting packets discarded due to OOM and netpoll igc: Fix LED-related deadlock on driver unbind ...
This commit is contained in:
commit
52afb15e9d
10
.mailmap
10
.mailmap
@ -38,6 +38,16 @@ Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
|
||||
Alexey Makhalov <alexey.amakhalov@broadcom.com> <amakhalov@vmware.com>
|
||||
Alex Elder <elder@kernel.org>
|
||||
Alex Elder <elder@kernel.org> <aelder@sgi.com>
|
||||
Alex Elder <elder@kernel.org> <alex.elder@linaro.org>
|
||||
Alex Elder <elder@kernel.org> <alex.elder@linary.org>
|
||||
Alex Elder <elder@kernel.org> <elder@dreamhost.com>
|
||||
Alex Elder <elder@kernel.org> <elder@dreawmhost.com>
|
||||
Alex Elder <elder@kernel.org> <elder@ieee.org>
|
||||
Alex Elder <elder@kernel.org> <elder@inktank.com>
|
||||
Alex Elder <elder@kernel.org> <elder@linaro.org>
|
||||
Alex Elder <elder@kernel.org> <elder@newdream.net>
|
||||
Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
|
||||
Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
|
||||
Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
|
||||
|
@ -7829,9 +7829,8 @@ W: http://aeschi.ch.eu.org/efs/
|
||||
F: fs/efs/
|
||||
|
||||
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
|
||||
M: Douglas Miller <dougmill@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/net/ethernet/ibm/ehea/
|
||||
|
||||
ELM327 CAN NETWORK DRIVER
|
||||
|
@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
switch (data->cd_info.state) {
|
||||
case HCI_DEVCOREDUMP_IDLE:
|
||||
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
data->cd_info.cnt = 0;
|
||||
|
||||
/* It is supposed coredump can be done within 5 seconds */
|
||||
@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
|
||||
if (err < 0)
|
||||
kfree_skb(skb);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
|
||||
|
@ -15,6 +15,8 @@
|
||||
|
||||
#define VERSION "0.1"
|
||||
|
||||
#define QCA_BDADDR_DEFAULT (&(bdaddr_t) {{ 0xad, 0x5a, 0x00, 0x00, 0x00, 0x00 }})
|
||||
|
||||
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
|
||||
enum qca_btsoc_type soc_type)
|
||||
{
|
||||
@ -612,6 +614,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
|
||||
|
||||
static int qca_check_bdaddr(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_rp_read_bd_addr *bda;
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
if (bacmp(&hdev->public_addr, BDADDR_ANY))
|
||||
return 0;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
|
||||
HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "Failed to read device address (%d)", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (skb->len != sizeof(*bda)) {
|
||||
bt_dev_err(hdev, "Device address length mismatch");
|
||||
kfree_skb(skb);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
bda = (struct hci_rp_read_bd_addr *)skb->data;
|
||||
if (!bacmp(&bda->bdaddr, QCA_BDADDR_DEFAULT))
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
|
||||
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
|
||||
{
|
||||
@ -818,6 +852,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
break;
|
||||
}
|
||||
|
||||
err = qca_check_bdaddr(hdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
bt_dev_info(hdev, "QCA setup on UART is completed");
|
||||
|
||||
return 0;
|
||||
|
@ -542,6 +542,8 @@ static const struct usb_device_id quirks_table[] = {
|
||||
/* Realtek 8852BE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
|
||||
@ -3480,13 +3482,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
static void btusb_coredump_qca(struct hci_dev *hdev)
|
||||
{
|
||||
int err;
|
||||
static const u8 param[] = { 0x26 };
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
|
||||
if (IS_ERR(skb))
|
||||
bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
|
||||
kfree_skb(skb);
|
||||
err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
|
||||
if (err < 0)
|
||||
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
bool wakeup;
|
||||
|
||||
if (!hu->serdev)
|
||||
return true;
|
||||
|
||||
/* BT SoC attached through the serial bus is handled by the serdev driver.
|
||||
* So we need to use the device handle of the serdev driver to get the
|
||||
* status of device may wakeup.
|
||||
@ -1905,8 +1908,6 @@ retry:
|
||||
case QCA_WCN6750:
|
||||
case QCA_WCN6855:
|
||||
case QCA_WCN7850:
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
|
||||
qcadev = serdev_device_get_drvdata(hu->serdev);
|
||||
if (qcadev->bdaddr_property_broken)
|
||||
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
|
||||
@ -1957,8 +1958,10 @@ retry:
|
||||
qca_debugfs_init(hdev);
|
||||
hu->hdev->hw_error = qca_hw_error;
|
||||
hu->hdev->cmd_timeout = qca_cmd_timeout;
|
||||
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
|
||||
hu->hdev->wakeup = qca_wakeup;
|
||||
if (hu->serdev) {
|
||||
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
|
||||
hu->hdev->wakeup = qca_wakeup;
|
||||
}
|
||||
} else if (ret == -ENOENT) {
|
||||
/* No patch/nvm-config found, run with original fw/config */
|
||||
set_bit(QCA_ROM_FW, &qca->flags);
|
||||
@ -2329,16 +2332,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
(data->soc_type == QCA_WCN6750 ||
|
||||
data->soc_type == QCA_WCN6855)) {
|
||||
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
|
||||
power_ctrl_enabled = false;
|
||||
return PTR_ERR(qcadev->bt_en);
|
||||
}
|
||||
|
||||
if (!qcadev->bt_en)
|
||||
power_ctrl_enabled = false;
|
||||
|
||||
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
|
||||
GPIOD_IN);
|
||||
if (IS_ERR(qcadev->sw_ctrl) &&
|
||||
(data->soc_type == QCA_WCN6750 ||
|
||||
data->soc_type == QCA_WCN6855 ||
|
||||
data->soc_type == QCA_WCN7850))
|
||||
dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
|
||||
data->soc_type == QCA_WCN7850)) {
|
||||
dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
|
||||
return PTR_ERR(qcadev->sw_ctrl);
|
||||
}
|
||||
|
||||
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
|
||||
if (IS_ERR(qcadev->susclk)) {
|
||||
@ -2357,10 +2365,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(qcadev->bt_en)) {
|
||||
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
|
||||
power_ctrl_enabled = false;
|
||||
dev_err(&serdev->dev, "failed to acquire enable gpio\n");
|
||||
return PTR_ERR(qcadev->bt_en);
|
||||
}
|
||||
|
||||
if (!qcadev->bt_en)
|
||||
power_ctrl_enabled = false;
|
||||
|
||||
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
|
||||
if (IS_ERR(qcadev->susclk)) {
|
||||
dev_warn(&serdev->dev, "failed to acquire clk\n");
|
||||
|
@ -42,6 +42,7 @@ struct dpll_pin_registration {
|
||||
struct list_head list;
|
||||
const struct dpll_pin_ops *ops;
|
||||
void *priv;
|
||||
void *cookie;
|
||||
};
|
||||
|
||||
struct dpll_device *dpll_device_get_by_id(int id)
|
||||
@ -54,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
|
||||
|
||||
static struct dpll_pin_registration *
|
||||
dpll_pin_registration_find(struct dpll_pin_ref *ref,
|
||||
const struct dpll_pin_ops *ops, void *priv)
|
||||
const struct dpll_pin_ops *ops, void *priv,
|
||||
void *cookie)
|
||||
{
|
||||
struct dpll_pin_registration *reg;
|
||||
|
||||
list_for_each_entry(reg, &ref->registration_list, list) {
|
||||
if (reg->ops == ops && reg->priv == priv)
|
||||
if (reg->ops == ops && reg->priv == priv &&
|
||||
reg->cookie == cookie)
|
||||
return reg;
|
||||
}
|
||||
return NULL;
|
||||
@ -67,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
|
||||
|
||||
static int
|
||||
dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
const struct dpll_pin_ops *ops, void *priv)
|
||||
const struct dpll_pin_ops *ops, void *priv,
|
||||
void *cookie)
|
||||
{
|
||||
struct dpll_pin_registration *reg;
|
||||
struct dpll_pin_ref *ref;
|
||||
@ -78,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
xa_for_each(xa_pins, i, ref) {
|
||||
if (ref->pin != pin)
|
||||
continue;
|
||||
reg = dpll_pin_registration_find(ref, ops, priv);
|
||||
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
|
||||
if (reg) {
|
||||
refcount_inc(&ref->refcount);
|
||||
return 0;
|
||||
@ -111,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
}
|
||||
reg->ops = ops;
|
||||
reg->priv = priv;
|
||||
reg->cookie = cookie;
|
||||
if (ref_exists)
|
||||
refcount_inc(&ref->refcount);
|
||||
list_add_tail(®->list, &ref->registration_list);
|
||||
@ -119,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
}
|
||||
|
||||
static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
const struct dpll_pin_ops *ops, void *priv)
|
||||
const struct dpll_pin_ops *ops, void *priv,
|
||||
void *cookie)
|
||||
{
|
||||
struct dpll_pin_registration *reg;
|
||||
struct dpll_pin_ref *ref;
|
||||
@ -128,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
xa_for_each(xa_pins, i, ref) {
|
||||
if (ref->pin != pin)
|
||||
continue;
|
||||
reg = dpll_pin_registration_find(ref, ops, priv);
|
||||
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
|
||||
if (WARN_ON(!reg))
|
||||
return -EINVAL;
|
||||
list_del(®->list);
|
||||
@ -146,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
|
||||
|
||||
static int
|
||||
dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
|
||||
const struct dpll_pin_ops *ops, void *priv)
|
||||
const struct dpll_pin_ops *ops, void *priv, void *cookie)
|
||||
{
|
||||
struct dpll_pin_registration *reg;
|
||||
struct dpll_pin_ref *ref;
|
||||
@ -157,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
|
||||
xa_for_each(xa_dplls, i, ref) {
|
||||
if (ref->dpll != dpll)
|
||||
continue;
|
||||
reg = dpll_pin_registration_find(ref, ops, priv);
|
||||
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
|
||||
if (reg) {
|
||||
refcount_inc(&ref->refcount);
|
||||
return 0;
|
||||
@ -190,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
|
||||
}
|
||||
reg->ops = ops;
|
||||
reg->priv = priv;
|
||||
reg->cookie = cookie;
|
||||
if (ref_exists)
|
||||
refcount_inc(&ref->refcount);
|
||||
list_add_tail(®->list, &ref->registration_list);
|
||||
@ -199,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
|
||||
|
||||
static void
|
||||
dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
|
||||
const struct dpll_pin_ops *ops, void *priv)
|
||||
const struct dpll_pin_ops *ops, void *priv, void *cookie)
|
||||
{
|
||||
struct dpll_pin_registration *reg;
|
||||
struct dpll_pin_ref *ref;
|
||||
@ -208,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
|
||||
xa_for_each(xa_dplls, i, ref) {
|
||||
if (ref->dpll != dpll)
|
||||
continue;
|
||||
reg = dpll_pin_registration_find(ref, ops, priv);
|
||||
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
|
||||
if (WARN_ON(!reg))
|
||||
return;
|
||||
list_del(®->list);
|
||||
@ -594,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
|
||||
|
||||
static int
|
||||
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
|
||||
const struct dpll_pin_ops *ops, void *priv)
|
||||
const struct dpll_pin_ops *ops, void *priv, void *cookie)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
|
||||
ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
|
||||
ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
|
||||
if (ret)
|
||||
goto ref_pin_del;
|
||||
xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
|
||||
@ -610,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
|
||||
return ret;
|
||||
|
||||
ref_pin_del:
|
||||
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
|
||||
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -642,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
|
||||
dpll->clock_id == pin->clock_id)))
|
||||
ret = -EINVAL;
|
||||
else
|
||||
ret = __dpll_pin_register(dpll, pin, ops, priv);
|
||||
ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
|
||||
mutex_unlock(&dpll_lock);
|
||||
|
||||
return ret;
|
||||
@ -651,11 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
|
||||
|
||||
static void
|
||||
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
|
||||
const struct dpll_pin_ops *ops, void *priv)
|
||||
const struct dpll_pin_ops *ops, void *priv, void *cookie)
|
||||
{
|
||||
ASSERT_DPLL_PIN_REGISTERED(pin);
|
||||
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
|
||||
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
|
||||
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
|
||||
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
|
||||
if (xa_empty(&pin->dpll_refs))
|
||||
xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
|
||||
}
|
||||
@ -680,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
|
||||
|
||||
mutex_lock(&dpll_lock);
|
||||
dpll_pin_delete_ntf(pin);
|
||||
__dpll_pin_unregister(dpll, pin, ops, priv);
|
||||
__dpll_pin_unregister(dpll, pin, ops, priv, NULL);
|
||||
mutex_unlock(&dpll_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dpll_pin_unregister);
|
||||
@ -716,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dpll_lock);
|
||||
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
|
||||
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
refcount_inc(&pin->refcount);
|
||||
xa_for_each(&parent->dpll_refs, i, ref) {
|
||||
ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
|
||||
ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
|
||||
if (ret) {
|
||||
stop = i;
|
||||
goto dpll_unregister;
|
||||
@ -735,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
|
||||
dpll_unregister:
|
||||
xa_for_each(&parent->dpll_refs, i, ref)
|
||||
if (i < stop) {
|
||||
__dpll_pin_unregister(ref->dpll, pin, ops, priv);
|
||||
__dpll_pin_unregister(ref->dpll, pin, ops, priv,
|
||||
parent);
|
||||
dpll_pin_delete_ntf(pin);
|
||||
}
|
||||
refcount_dec(&pin->refcount);
|
||||
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
|
||||
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
|
||||
unlock:
|
||||
mutex_unlock(&dpll_lock);
|
||||
return ret;
|
||||
@ -764,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
|
||||
|
||||
mutex_lock(&dpll_lock);
|
||||
dpll_pin_delete_ntf(pin);
|
||||
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
|
||||
dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
|
||||
refcount_dec(&pin->refcount);
|
||||
xa_for_each(&pin->dpll_refs, i, ref)
|
||||
__dpll_pin_unregister(ref->dpll, pin, ops, priv);
|
||||
__dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
|
||||
mutex_unlock(&dpll_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
|
||||
|
@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
|
||||
phy_interface_set_rgmii(supported);
|
||||
}
|
||||
|
||||
static void
|
||||
mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
|
||||
struct phylink_config *config)
|
||||
{
|
||||
unsigned long *supported = config->supported_interfaces;
|
||||
int err;
|
||||
u16 reg;
|
||||
|
||||
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "p%d: failed to read port status\n", port);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
|
||||
__set_bit(PHY_INTERFACE_MODE_REVMII, supported);
|
||||
break;
|
||||
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
|
||||
__set_bit(PHY_INTERFACE_MODE_MII, supported);
|
||||
break;
|
||||
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
|
||||
__set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
|
||||
break;
|
||||
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
|
||||
__set_bit(PHY_INTERFACE_MODE_RMII, supported);
|
||||
break;
|
||||
|
||||
case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
|
||||
__set_bit(PHY_INTERFACE_MODE_RGMII, supported);
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(chip->dev,
|
||||
"p%d: invalid port mode in status register: %04x\n",
|
||||
port, reg);
|
||||
}
|
||||
}
|
||||
|
||||
static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
|
||||
struct phylink_config *config)
|
||||
{
|
||||
unsigned long *supported = config->supported_interfaces;
|
||||
|
||||
/* Translate the default cmode */
|
||||
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
|
||||
if (!mv88e6xxx_phy_is_internal(chip, port))
|
||||
mv88e6250_setup_supported_interfaces(chip, port, config);
|
||||
|
||||
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
|
||||
}
|
||||
|
@ -25,10 +25,25 @@
|
||||
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
|
||||
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
|
||||
/* - Modes with PHY suffix use output instead of input clock
|
||||
* - Modes without RMII or RGMII use MII
|
||||
* - Modes without speed do not have a fixed speed specified in the manual
|
||||
* ("DC to x MHz" - variable clock support?)
|
||||
*/
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
|
||||
#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
|
||||
#define MV88E6XXX_PORT_STS_LINK 0x0800
|
||||
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
|
||||
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
|
||||
|
@ -436,10 +436,8 @@ static void umac_init(struct bcmasp_intf *intf)
|
||||
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
|
||||
}
|
||||
|
||||
static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
|
||||
static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
|
||||
{
|
||||
struct bcmasp_intf *intf =
|
||||
container_of(napi, struct bcmasp_intf, tx_napi);
|
||||
struct bcmasp_intf_stats64 *stats = &intf->stats64;
|
||||
struct device *kdev = &intf->parent->pdev->dev;
|
||||
unsigned long read, released = 0;
|
||||
@ -482,10 +480,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
|
||||
DESC_RING_COUNT);
|
||||
}
|
||||
|
||||
/* Ensure all descriptors have been written to DRAM for the hardware
|
||||
* to see updated contents.
|
||||
*/
|
||||
wmb();
|
||||
return released;
|
||||
}
|
||||
|
||||
static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct bcmasp_intf *intf =
|
||||
container_of(napi, struct bcmasp_intf, tx_napi);
|
||||
int released = 0;
|
||||
|
||||
released = bcmasp_tx_reclaim(intf);
|
||||
|
||||
napi_complete(&intf->tx_napi);
|
||||
|
||||
@ -797,6 +801,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
|
||||
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
|
||||
intf->tx_spb_index = 0;
|
||||
intf->tx_spb_clean_index = 0;
|
||||
memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
|
||||
|
||||
/* Make sure channels are disabled */
|
||||
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
|
||||
@ -885,6 +890,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
|
||||
} while (timeout-- > 0);
|
||||
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
|
||||
|
||||
bcmasp_tx_reclaim(intf);
|
||||
|
||||
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
|
||||
|
||||
phy_stop(dev->phydev);
|
||||
|
@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
|
||||
bp->flags |= B44_FLAG_TX_PAUSE;
|
||||
else
|
||||
bp->flags &= ~B44_FLAG_TX_PAUSE;
|
||||
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
|
||||
b44_halt(bp);
|
||||
b44_init_rings(bp);
|
||||
b44_init_hw(bp, B44_FULL_RESET);
|
||||
} else {
|
||||
__b44_set_flow_ctrl(bp, bp->flags);
|
||||
if (netif_running(dev)) {
|
||||
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
|
||||
b44_halt(bp);
|
||||
b44_init_rings(bp);
|
||||
b44_init_hw(bp, B44_FULL_RESET);
|
||||
} else {
|
||||
__b44_set_flow_ctrl(bp, bp->flags);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&bp->lock);
|
||||
|
||||
|
@ -1778,7 +1778,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
|
||||
if (!skb) {
|
||||
bnxt_abort_tpa(cpr, idx, agg_bufs);
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
@ -1788,7 +1788,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
|
||||
if (!new_data) {
|
||||
bnxt_abort_tpa(cpr, idx, agg_bufs);
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1804,7 +1804,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
if (!skb) {
|
||||
skb_free_frag(data);
|
||||
bnxt_abort_tpa(cpr, idx, agg_bufs);
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
skb_reserve(skb, bp->rx_offset);
|
||||
@ -1815,7 +1815,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
|
||||
if (!skb) {
|
||||
/* Page reuse already handled by bnxt_rx_pages(). */
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -2094,11 +2094,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
|
||||
cp_cons, agg_bufs,
|
||||
false);
|
||||
if (!frag_len) {
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
}
|
||||
if (!frag_len)
|
||||
goto oom_next_rx;
|
||||
}
|
||||
xdp_active = true;
|
||||
}
|
||||
@ -2121,9 +2118,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
else
|
||||
bnxt_xdp_buff_frags_free(rxr, &xdp);
|
||||
}
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
goto oom_next_rx;
|
||||
}
|
||||
} else {
|
||||
u32 payload;
|
||||
@ -2134,29 +2129,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
payload = 0;
|
||||
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
|
||||
payload | len);
|
||||
if (!skb) {
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
}
|
||||
if (!skb)
|
||||
goto oom_next_rx;
|
||||
}
|
||||
|
||||
if (agg_bufs) {
|
||||
if (!xdp_active) {
|
||||
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
|
||||
if (!skb) {
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
}
|
||||
if (!skb)
|
||||
goto oom_next_rx;
|
||||
} else {
|
||||
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
|
||||
if (!skb) {
|
||||
/* we should be able to free the old skb here */
|
||||
bnxt_xdp_buff_frags_free(rxr, &xdp);
|
||||
cpr->sw_stats.rx.rx_oom_discards += 1;
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
goto oom_next_rx;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2234,6 +2221,11 @@ next_rx_no_prod_no_len:
|
||||
*raw_cons = tmp_raw_cons;
|
||||
|
||||
return rc;
|
||||
|
||||
oom_next_rx:
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
}
|
||||
|
||||
/* In netpoll mode, if we are using a combined completion ring, we need to
|
||||
@ -2280,7 +2272,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
|
||||
}
|
||||
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
|
||||
if (rc && rc != -EBUSY)
|
||||
cpr->sw_stats.rx.rx_netpoll_discards += 1;
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -9089,7 +9081,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
|
||||
BNXT_FW_HEALTH_WIN_BASE +
|
||||
BNXT_GRC_REG_CHIP_NUM);
|
||||
}
|
||||
if (!BNXT_CHIP_P5(bp))
|
||||
if (!BNXT_CHIP_P5_PLUS(bp))
|
||||
return;
|
||||
|
||||
status_loc = BNXT_GRC_REG_STATUS_P5 |
|
||||
@ -13037,6 +13029,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
}
|
||||
|
||||
static void bnxt_fw_fatal_close(struct bnxt *bp)
|
||||
{
|
||||
bnxt_tx_disable(bp);
|
||||
bnxt_disable_napi(bp);
|
||||
bnxt_disable_int_sync(bp);
|
||||
bnxt_free_irq(bp);
|
||||
bnxt_clear_int_mode(bp);
|
||||
pci_disable_device(bp->pdev);
|
||||
}
|
||||
|
||||
static void bnxt_fw_reset_close(struct bnxt *bp)
|
||||
{
|
||||
bnxt_ulp_stop(bp);
|
||||
@ -13050,12 +13052,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
|
||||
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
|
||||
if (val == 0xffff)
|
||||
bp->fw_reset_min_dsecs = 0;
|
||||
bnxt_tx_disable(bp);
|
||||
bnxt_disable_napi(bp);
|
||||
bnxt_disable_int_sync(bp);
|
||||
bnxt_free_irq(bp);
|
||||
bnxt_clear_int_mode(bp);
|
||||
pci_disable_device(bp->pdev);
|
||||
bnxt_fw_fatal_close(bp);
|
||||
}
|
||||
__bnxt_close_nic(bp, true, false);
|
||||
bnxt_vf_reps_free(bp);
|
||||
@ -15373,6 +15370,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct bnxt *bp = netdev_priv(netdev);
|
||||
bool abort = false;
|
||||
|
||||
netdev_info(netdev, "PCI I/O error detected\n");
|
||||
|
||||
@ -15381,16 +15379,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
|
||||
|
||||
bnxt_ulp_stop(bp);
|
||||
|
||||
if (state == pci_channel_io_perm_failure) {
|
||||
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
|
||||
netdev_err(bp->dev, "Firmware reset already in progress\n");
|
||||
abort = true;
|
||||
}
|
||||
|
||||
if (abort || state == pci_channel_io_perm_failure) {
|
||||
rtnl_unlock();
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
if (state == pci_channel_io_frozen)
|
||||
/* Link is not reliable anymore if state is pci_channel_io_frozen
|
||||
* so we disable bus master to prevent any potential bad DMAs before
|
||||
* freeing kernel memory.
|
||||
*/
|
||||
if (state == pci_channel_io_frozen) {
|
||||
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
|
||||
bnxt_fw_fatal_close(bp);
|
||||
}
|
||||
|
||||
if (netif_running(netdev))
|
||||
bnxt_close(netdev);
|
||||
__bnxt_close_nic(bp, true, true);
|
||||
|
||||
if (pci_is_enabled(pdev))
|
||||
pci_disable_device(pdev);
|
||||
@ -15474,6 +15483,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
reset_exit:
|
||||
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
|
||||
bnxt_clear_reservations(bp, true);
|
||||
rtnl_unlock();
|
||||
|
||||
|
@ -16107,8 +16107,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
|
||||
rd32(&pf->hw, I40E_PRTGL_SAH));
|
||||
if (val < MAX_FRAME_SIZE_DEFAULT)
|
||||
dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
|
||||
pf->hw.port, val);
|
||||
dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
|
||||
pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
|
||||
|
||||
/* Add a filter to drop all Flow control frames from any VSI from being
|
||||
* transmitted. By doing so we stop a malicious VF from sending out
|
||||
@ -16650,7 +16650,7 @@ static int __init i40e_init_module(void)
|
||||
* since we need to be able to guarantee forward progress even under
|
||||
* memory pressure.
|
||||
*/
|
||||
i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
|
||||
i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
|
||||
if (!i40e_wq) {
|
||||
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
|
||||
return -ENOMEM;
|
||||
|
@ -3502,6 +3502,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
|
||||
spin_unlock_bh(&adapter->cloud_filter_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_is_tc_config_same - Compare the mqprio TC config with the
|
||||
* TC config already configured on this adapter.
|
||||
* @adapter: board private structure
|
||||
* @mqprio_qopt: TC config received from kernel.
|
||||
*
|
||||
* This function compares the TC config received from the kernel
|
||||
* with the config already configured on the adapter.
|
||||
*
|
||||
* Return: True if configuration is same, false otherwise.
|
||||
**/
|
||||
static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
|
||||
struct tc_mqprio_qopt *mqprio_qopt)
|
||||
{
|
||||
struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
|
||||
int i;
|
||||
|
||||
if (adapter->num_tc != mqprio_qopt->num_tc)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < adapter->num_tc; i++) {
|
||||
if (ch[i].count != mqprio_qopt->count[i] ||
|
||||
ch[i].offset != mqprio_qopt->offset[i])
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* __iavf_setup_tc - configure multiple traffic classes
|
||||
* @netdev: network interface device structure
|
||||
@ -3559,7 +3587,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Return if same TC config is requested */
|
||||
if (adapter->num_tc == num_tc)
|
||||
if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
|
||||
return 0;
|
||||
adapter->num_tc = num_tc;
|
||||
|
||||
|
@ -856,6 +856,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (flags & ICE_VF_RESET_LOCK)
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
else
|
||||
lockdep_assert_held(&vf->cfg_lock);
|
||||
|
||||
lag = pf->lag;
|
||||
mutex_lock(&pf->lag_mutex);
|
||||
if (lag && lag->bonded && lag->primary) {
|
||||
@ -867,11 +872,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
|
||||
act_prt = ICE_LAG_INVALID_PORT;
|
||||
}
|
||||
|
||||
if (flags & ICE_VF_RESET_LOCK)
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
else
|
||||
lockdep_assert_held(&vf->cfg_lock);
|
||||
|
||||
if (ice_is_vf_disabled(vf)) {
|
||||
vsi = ice_get_vf_vsi(vf);
|
||||
if (!vsi) {
|
||||
@ -956,14 +956,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
|
||||
ice_mbx_clear_malvf(&vf->mbx_info);
|
||||
|
||||
out_unlock:
|
||||
if (flags & ICE_VF_RESET_LOCK)
|
||||
mutex_unlock(&vf->cfg_lock);
|
||||
|
||||
if (lag && lag->bonded && lag->primary &&
|
||||
act_prt != ICE_LAG_INVALID_PORT)
|
||||
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
|
||||
mutex_unlock(&pf->lag_mutex);
|
||||
|
||||
if (flags & ICE_VF_RESET_LOCK)
|
||||
mutex_unlock(&vf->cfg_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -298,6 +298,7 @@ struct igc_adapter {
|
||||
|
||||
/* LEDs */
|
||||
struct mutex led_mutex;
|
||||
struct igc_led_classdev *leds;
|
||||
};
|
||||
|
||||
void igc_up(struct igc_adapter *adapter);
|
||||
@ -723,6 +724,7 @@ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
|
||||
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
|
||||
|
||||
int igc_led_setup(struct igc_adapter *adapter);
|
||||
void igc_led_free(struct igc_adapter *adapter);
|
||||
|
||||
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
|
||||
|
||||
|
@ -236,8 +236,8 @@ static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
|
||||
pci_dev_id(adapter->pdev), index);
|
||||
}
|
||||
|
||||
static void igc_setup_ldev(struct igc_led_classdev *ldev,
|
||||
struct net_device *netdev, int index)
|
||||
static int igc_setup_ldev(struct igc_led_classdev *ldev,
|
||||
struct net_device *netdev, int index)
|
||||
{
|
||||
struct igc_adapter *adapter = netdev_priv(netdev);
|
||||
struct led_classdev *led_cdev = &ldev->led;
|
||||
@ -257,24 +257,46 @@ static void igc_setup_ldev(struct igc_led_classdev *ldev,
|
||||
led_cdev->hw_control_get = igc_led_hw_control_get;
|
||||
led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
|
||||
|
||||
devm_led_classdev_register(&netdev->dev, led_cdev);
|
||||
return led_classdev_register(&netdev->dev, led_cdev);
|
||||
}
|
||||
|
||||
int igc_led_setup(struct igc_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct device *dev = &netdev->dev;
|
||||
struct igc_led_classdev *leds;
|
||||
int i;
|
||||
int i, err;
|
||||
|
||||
mutex_init(&adapter->led_mutex);
|
||||
|
||||
leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
|
||||
leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
|
||||
if (!leds)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < IGC_NUM_LEDS; i++)
|
||||
igc_setup_ldev(leds + i, netdev, i);
|
||||
for (i = 0; i < IGC_NUM_LEDS; i++) {
|
||||
err = igc_setup_ldev(leds + i, netdev, i);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
adapter->leds = leds;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i--; i >= 0; i--)
|
||||
led_classdev_unregister(&((leds + i)->led));
|
||||
|
||||
kfree(leds);
|
||||
return err;
|
||||
}
|
||||
|
||||
void igc_led_free(struct igc_adapter *adapter)
|
||||
{
|
||||
struct igc_led_classdev *leds = adapter->leds;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IGC_NUM_LEDS; i++)
|
||||
led_classdev_unregister(&((leds + i)->led));
|
||||
|
||||
kfree(leds);
|
||||
}
|
||||
|
@ -7021,6 +7021,9 @@ static void igc_remove(struct pci_dev *pdev)
|
||||
cancel_work_sync(&adapter->watchdog_task);
|
||||
hrtimer_cancel(&adapter->hrtimer);
|
||||
|
||||
if (IS_ENABLED(CONFIG_IGC_LEDS))
|
||||
igc_led_free(adapter);
|
||||
|
||||
/* Release control of h/w to f/w. If f/w is AMT enabled, this
|
||||
* would have already happened in close and is redundant.
|
||||
*/
|
||||
|
@ -2181,7 +2181,6 @@ void rvu_npc_freemem(struct rvu *rvu)
|
||||
|
||||
kfree(pkind->rsrc.bmap);
|
||||
npc_mcam_rsrcs_deinit(rvu);
|
||||
kfree(mcam->counters.bmap);
|
||||
if (rvu->kpu_prfl_addr)
|
||||
iounmap(rvu->kpu_prfl_addr);
|
||||
else
|
||||
|
@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
|
||||
.mdo_add_secy = mlx5e_macsec_add_secy,
|
||||
.mdo_upd_secy = mlx5e_macsec_upd_secy,
|
||||
.mdo_del_secy = mlx5e_macsec_del_secy,
|
||||
.rx_uses_md_dst = true,
|
||||
};
|
||||
|
||||
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
|
||||
|
@ -849,7 +849,7 @@ free_skb:
|
||||
|
||||
static const struct mlxsw_listener mlxsw_emad_rx_listener =
|
||||
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
|
||||
EMAD, DISCARD);
|
||||
EMAD, FORWARD);
|
||||
|
||||
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
|
@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
|
||||
.got_inactive = mlxsw_env_got_inactive,
|
||||
};
|
||||
|
||||
static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
|
||||
static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
|
||||
{
|
||||
char mcam_pl[MLXSW_REG_MCAM_LEN];
|
||||
bool mcia_128b_supported;
|
||||
bool mcia_128b_supported = false;
|
||||
int err;
|
||||
|
||||
mlxsw_reg_mcam_pack(mcam_pl,
|
||||
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
|
||||
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
|
||||
&mcia_128b_supported);
|
||||
if (!err)
|
||||
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
|
||||
&mcia_128b_supported);
|
||||
|
||||
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
|
||||
@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
|
||||
if (err)
|
||||
goto err_type_set;
|
||||
|
||||
err = mlxsw_env_max_module_eeprom_len_query(env);
|
||||
if (err)
|
||||
goto err_eeprom_len_query;
|
||||
|
||||
mlxsw_env_max_module_eeprom_len_query(env);
|
||||
env->line_cards[0]->active = true;
|
||||
|
||||
return 0;
|
||||
|
||||
err_eeprom_len_query:
|
||||
err_type_set:
|
||||
mlxsw_env_module_event_disable(env, 0);
|
||||
err_mlxsw_env_module_event_enable:
|
||||
|
@ -1530,7 +1530,7 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
|
||||
{
|
||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||
char mcam_pl[MLXSW_REG_MCAM_LEN];
|
||||
bool pci_reset_supported;
|
||||
bool pci_reset_supported = false;
|
||||
u32 sys_status;
|
||||
int err;
|
||||
|
||||
@ -1548,11 +1548,9 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
|
||||
mlxsw_reg_mcam_pack(mcam_pl,
|
||||
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
|
||||
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
|
||||
&pci_reset_supported);
|
||||
if (!err)
|
||||
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
|
||||
&pci_reset_supported);
|
||||
|
||||
if (pci_reset_supported) {
|
||||
pci_dbg(pdev, "Starting PCI reset flow\n");
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/idr.h>
|
||||
#include <net/devlink.h>
|
||||
#include <trace/events/mlxsw.h>
|
||||
|
||||
@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
|
||||
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
|
||||
u16 *p_id)
|
||||
{
|
||||
u16 id;
|
||||
int id;
|
||||
|
||||
id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
|
||||
if (id < tcam->max_regions) {
|
||||
__set_bit(id, tcam->used_regions);
|
||||
*p_id = id;
|
||||
return 0;
|
||||
}
|
||||
return -ENOBUFS;
|
||||
id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
|
||||
GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
*p_id = id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
|
||||
u16 id)
|
||||
{
|
||||
__clear_bit(id, tcam->used_regions);
|
||||
ida_free(&tcam->used_regions, id);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
|
||||
u16 *p_id)
|
||||
{
|
||||
u16 id;
|
||||
int id;
|
||||
|
||||
id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
|
||||
if (id < tcam->max_groups) {
|
||||
__set_bit(id, tcam->used_groups);
|
||||
*p_id = id;
|
||||
return 0;
|
||||
}
|
||||
return -ENOBUFS;
|
||||
id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
|
||||
GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
*p_id = id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
|
||||
u16 id)
|
||||
{
|
||||
__clear_bit(id, tcam->used_groups);
|
||||
ida_free(&tcam->used_groups, id);
|
||||
}
|
||||
|
||||
struct mlxsw_sp_acl_tcam_pattern {
|
||||
@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
|
||||
rehash.dw.work);
|
||||
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
|
||||
|
||||
mutex_lock(&vregion->lock);
|
||||
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
|
||||
mutex_unlock(&vregion->lock);
|
||||
if (credits < 0)
|
||||
/* Rehash gone out of credits so it was interrupted.
|
||||
* Schedule the work as soon as possible to continue.
|
||||
@ -725,6 +730,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
|
||||
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
|
||||
{
|
||||
/* The entry markers are relative to the current chunk and therefore
|
||||
* needs to be reset together with the chunk marker.
|
||||
*/
|
||||
ctx->current_vchunk = NULL;
|
||||
ctx->start_ventry = NULL;
|
||||
ctx->stop_ventry = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
|
||||
{
|
||||
@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
|
||||
* the current chunk pointer to make sure all chunks
|
||||
* are properly migrated.
|
||||
*/
|
||||
vregion->rehash.ctx.current_vchunk = NULL;
|
||||
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_acl_tcam_vregion *
|
||||
@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
|
||||
|
||||
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
|
||||
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
|
||||
|
||||
mutex_lock(&tcam->lock);
|
||||
list_del(&vregion->tlist);
|
||||
mutex_unlock(&tcam->lock);
|
||||
cancel_delayed_work_sync(&vregion->rehash.dw);
|
||||
if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
|
||||
ctx->hints_priv)
|
||||
ops->region_rehash_hints_put(ctx->hints_priv);
|
||||
}
|
||||
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
|
||||
if (vregion->region2)
|
||||
@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_tcam_ventry *ventry,
|
||||
bool *activity)
|
||||
{
|
||||
return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
|
||||
ventry->entry, activity);
|
||||
struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
|
||||
int err;
|
||||
|
||||
mutex_lock(&vregion->lock);
|
||||
err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
|
||||
activity);
|
||||
mutex_unlock(&vregion->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
|
||||
{
|
||||
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
|
||||
|
||||
WARN_ON(vchunk->chunk2);
|
||||
|
||||
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
|
||||
if (IS_ERR(new_chunk))
|
||||
return PTR_ERR(new_chunk);
|
||||
@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
|
||||
{
|
||||
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
|
||||
vchunk->chunk2 = NULL;
|
||||
ctx->current_vchunk = NULL;
|
||||
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (list_empty(&vchunk->ventry_list))
|
||||
goto out;
|
||||
|
||||
/* If the migration got interrupted, we have the ventry to start from
|
||||
* stored in context.
|
||||
*/
|
||||
@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
|
||||
ventry = list_first_entry(&vchunk->ventry_list,
|
||||
typeof(*ventry), list);
|
||||
|
||||
WARN_ON(ventry->vchunk != vchunk);
|
||||
|
||||
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
|
||||
/* During rollback, once we reach the ventry that failed
|
||||
* to migrate, we are done.
|
||||
@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
|
||||
return 0;
|
||||
}
|
||||
@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
|
||||
int err;
|
||||
|
||||
if (list_empty(&vregion->vchunk_list))
|
||||
return 0;
|
||||
|
||||
/* If the migration got interrupted, we have the vchunk
|
||||
* we are working on stored in context.
|
||||
*/
|
||||
@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
|
||||
int err, err2;
|
||||
|
||||
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
|
||||
mutex_lock(&vregion->lock);
|
||||
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
|
||||
ctx, credits);
|
||||
if (err) {
|
||||
if (ctx->this_is_rollback)
|
||||
return err;
|
||||
/* In case migration was not successful, we need to swap
|
||||
* so the original region pointer is assigned again
|
||||
* to vregion->region.
|
||||
*/
|
||||
swap(vregion->region, vregion->region2);
|
||||
ctx->current_vchunk = NULL;
|
||||
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
|
||||
ctx->this_is_rollback = true;
|
||||
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
|
||||
ctx, credits);
|
||||
@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
|
||||
/* Let the rollback to be continued later on. */
|
||||
}
|
||||
}
|
||||
mutex_unlock(&vregion->lock);
|
||||
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
|
||||
return err;
|
||||
}
|
||||
@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
ctx->hints_priv = hints_priv;
|
||||
ctx->this_is_rollback = false;
|
||||
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
|
||||
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
|
||||
ctx, credits);
|
||||
if (err) {
|
||||
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
|
||||
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (*credits >= 0)
|
||||
@ -1549,19 +1588,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
|
||||
if (max_tcam_regions < max_regions)
|
||||
max_regions = max_tcam_regions;
|
||||
|
||||
tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
|
||||
if (!tcam->used_regions) {
|
||||
err = -ENOMEM;
|
||||
goto err_alloc_used_regions;
|
||||
}
|
||||
ida_init(&tcam->used_regions);
|
||||
tcam->max_regions = max_regions;
|
||||
|
||||
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
|
||||
tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
|
||||
if (!tcam->used_groups) {
|
||||
err = -ENOMEM;
|
||||
goto err_alloc_used_groups;
|
||||
}
|
||||
ida_init(&tcam->used_groups);
|
||||
tcam->max_groups = max_groups;
|
||||
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
|
||||
ACL_MAX_GROUP_SIZE);
|
||||
@ -1575,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
|
||||
return 0;
|
||||
|
||||
err_tcam_init:
|
||||
bitmap_free(tcam->used_groups);
|
||||
err_alloc_used_groups:
|
||||
bitmap_free(tcam->used_regions);
|
||||
err_alloc_used_regions:
|
||||
ida_destroy(&tcam->used_groups);
|
||||
ida_destroy(&tcam->used_regions);
|
||||
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
|
||||
err_rehash_params_register:
|
||||
mutex_destroy(&tcam->lock);
|
||||
@ -1591,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
|
||||
|
||||
ops->fini(mlxsw_sp, tcam->priv);
|
||||
bitmap_free(tcam->used_groups);
|
||||
bitmap_free(tcam->used_regions);
|
||||
ida_destroy(&tcam->used_groups);
|
||||
ida_destroy(&tcam->used_regions);
|
||||
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
|
||||
mutex_destroy(&tcam->lock);
|
||||
}
|
||||
|
@ -6,15 +6,16 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/parman.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
#include "reg.h"
|
||||
#include "spectrum.h"
|
||||
#include "core_acl_flex_keys.h"
|
||||
|
||||
struct mlxsw_sp_acl_tcam {
|
||||
unsigned long *used_regions; /* bit array */
|
||||
struct ida used_regions;
|
||||
unsigned int max_regions;
|
||||
unsigned long *used_groups; /* bit array */
|
||||
struct ida used_groups;
|
||||
unsigned int max_groups;
|
||||
unsigned int max_group_size;
|
||||
struct mutex lock; /* guards vregion list */
|
||||
|
@ -2722,19 +2722,18 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
|
||||
struct platform_device *pdev = priv->pdev;
|
||||
struct net_device *ndev = priv->ndev;
|
||||
struct device *dev = &pdev->dev;
|
||||
const char *dev_name;
|
||||
const char *devname = dev_name(dev);
|
||||
unsigned long flags;
|
||||
int error, irq_num;
|
||||
|
||||
if (irq_name) {
|
||||
dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
|
||||
if (!dev_name)
|
||||
devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
|
||||
if (!devname)
|
||||
return -ENOMEM;
|
||||
|
||||
irq_num = platform_get_irq_byname(pdev, irq_name);
|
||||
flags = 0;
|
||||
} else {
|
||||
dev_name = ndev->name;
|
||||
irq_num = platform_get_irq(pdev, 0);
|
||||
flags = IRQF_SHARED;
|
||||
}
|
||||
@ -2744,9 +2743,9 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
|
||||
if (irq)
|
||||
*irq = irq_num;
|
||||
|
||||
error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
|
||||
error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
|
||||
if (error)
|
||||
netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
|
||||
netdev_err(ndev, "cannot request IRQ %s\n", devname);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -784,6 +784,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
|
||||
struct am65_cpts_skb_cb_data *skb_cb =
|
||||
(struct am65_cpts_skb_cb_data *)skb->cb;
|
||||
|
||||
if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
|
||||
((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
|
||||
(skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
|
||||
mtype_seqid = skb_cb->skb_mtype_seqid;
|
||||
|
||||
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
|
||||
u64 ns = event->timestamp;
|
||||
|
||||
|
@ -421,12 +421,14 @@ static int prueth_init_rx_chns(struct prueth_emac *emac,
|
||||
if (!i)
|
||||
fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
|
||||
i);
|
||||
rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
|
||||
if (rx_chn->irq[i] <= 0) {
|
||||
ret = rx_chn->irq[i];
|
||||
ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
|
||||
if (ret <= 0) {
|
||||
if (!ret)
|
||||
ret = -ENXIO;
|
||||
netdev_err(ndev, "Failed to get rx dma irq");
|
||||
goto fail;
|
||||
}
|
||||
rx_chn->irq[i] = ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1598,7 +1598,7 @@ static void wx_set_num_queues(struct wx *wx)
|
||||
*/
|
||||
static int wx_acquire_msix_vectors(struct wx *wx)
|
||||
{
|
||||
struct irq_affinity affd = {0, };
|
||||
struct irq_affinity affd = { .pre_vectors = 1 };
|
||||
int nvecs, i;
|
||||
|
||||
/* We start by asking for one vector per queue pair */
|
||||
|
@ -20,8 +20,6 @@
|
||||
#include "txgbe_phy.h"
|
||||
#include "txgbe_hw.h"
|
||||
|
||||
#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
|
||||
|
||||
static int txgbe_swnodes_register(struct txgbe *txgbe)
|
||||
{
|
||||
struct txgbe_nodes *nodes = &txgbe->nodes;
|
||||
@ -573,8 +571,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
|
||||
char clk_name[32];
|
||||
struct clk *clk;
|
||||
|
||||
snprintf(clk_name, sizeof(clk_name), "%s.%d",
|
||||
TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
|
||||
snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
|
||||
pci_dev_id(pdev));
|
||||
|
||||
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
|
||||
if (IS_ERR(clk))
|
||||
@ -636,7 +634,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
|
||||
|
||||
info.parent = &pdev->dev;
|
||||
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
|
||||
info.name = TXGBE_I2C_CLK_DEV_NAME;
|
||||
info.name = "i2c_designware";
|
||||
info.id = pci_dev_id(pdev);
|
||||
|
||||
info.res = &DEFINE_RES_IRQ(pdev->irq);
|
||||
|
@ -1098,11 +1098,12 @@ out_hashtable:
|
||||
static void gtp_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct gtp_dev *gtp = netdev_priv(dev);
|
||||
struct hlist_node *next;
|
||||
struct pdp_ctx *pctx;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gtp->hash_size; i++)
|
||||
hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
|
||||
hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid)
|
||||
pdp_context_delete(pctx);
|
||||
|
||||
list_del_rcu(>p->list);
|
||||
|
@ -999,10 +999,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
|
||||
struct metadata_dst *md_dst;
|
||||
struct macsec_rxh_data *rxd;
|
||||
struct macsec_dev *macsec;
|
||||
bool is_macsec_md_dst;
|
||||
|
||||
rcu_read_lock();
|
||||
rxd = macsec_data_rcu(skb->dev);
|
||||
md_dst = skb_metadata_dst(skb);
|
||||
is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
|
||||
|
||||
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
|
||||
struct sk_buff *nskb;
|
||||
@ -1013,14 +1015,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
|
||||
* the SecTAG, so we have to deduce which port to deliver to.
|
||||
*/
|
||||
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
|
||||
struct macsec_rx_sc *rx_sc = NULL;
|
||||
const struct macsec_ops *ops;
|
||||
|
||||
if (md_dst && md_dst->type == METADATA_MACSEC)
|
||||
rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
|
||||
ops = macsec_get_ops(macsec, NULL);
|
||||
|
||||
if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
|
||||
if (ops->rx_uses_md_dst && !is_macsec_md_dst)
|
||||
continue;
|
||||
|
||||
if (is_macsec_md_dst) {
|
||||
struct macsec_rx_sc *rx_sc;
|
||||
|
||||
/* All drivers that implement MACsec offload
|
||||
* support using skb metadata destinations must
|
||||
* indicate that they do so.
|
||||
*/
|
||||
DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
|
||||
rx_sc = find_rx_sc(&macsec->secy,
|
||||
md_dst->u.macsec_info.sci);
|
||||
if (!rx_sc)
|
||||
continue;
|
||||
/* device indicated macsec offload occurred */
|
||||
skb->dev = ndev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
eth_skb_pkt_type(skb, ndev);
|
||||
ret = RX_HANDLER_ANOTHER;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* This datapath is insecure because it is unable to
|
||||
* enforce isolation of broadcast/multicast traffic and
|
||||
* unicast traffic with promiscuous mode on the macsec
|
||||
* netdev. Since the core stack has no mechanism to
|
||||
* check that the hardware did indeed receive MACsec
|
||||
* traffic, it is possible that the response handling
|
||||
* done by the MACsec port was to a plaintext packet.
|
||||
* This violates the MACsec protocol standard.
|
||||
*/
|
||||
if (ether_addr_equal_64bits(hdr->h_dest,
|
||||
ndev->dev_addr)) {
|
||||
/* exact match, divert skb to this port */
|
||||
@ -1036,14 +1066,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
|
||||
break;
|
||||
|
||||
nskb->dev = ndev;
|
||||
if (ether_addr_equal_64bits(hdr->h_dest,
|
||||
ndev->broadcast))
|
||||
nskb->pkt_type = PACKET_BROADCAST;
|
||||
else
|
||||
nskb->pkt_type = PACKET_MULTICAST;
|
||||
eth_skb_pkt_type(nskb, ndev);
|
||||
|
||||
__netif_rx(nskb);
|
||||
} else if (rx_sc || ndev->flags & IFF_PROMISC) {
|
||||
} else if (ndev->flags & IFF_PROMISC) {
|
||||
skb->dev = ndev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
ret = RX_HANDLER_ANOTHER;
|
||||
|
@ -695,7 +695,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
|
||||
phy_ctrl_val = dp83869->mode;
|
||||
if (phydev->interface == PHY_INTERFACE_MODE_MII) {
|
||||
if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
|
||||
dp83869->mode == DP83869_RGMII_100_BASE) {
|
||||
dp83869->mode == DP83869_RGMII_100_BASE ||
|
||||
dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
|
||||
phy_ctrl_val |= DP83869_OP_MODE_MII;
|
||||
} else {
|
||||
phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
|
||||
|
@ -216,6 +216,9 @@
|
||||
#define MTK_PHY_LED_ON_LINK1000 BIT(0)
|
||||
#define MTK_PHY_LED_ON_LINK100 BIT(1)
|
||||
#define MTK_PHY_LED_ON_LINK10 BIT(2)
|
||||
#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
|
||||
MTK_PHY_LED_ON_LINK100 |\
|
||||
MTK_PHY_LED_ON_LINK1000)
|
||||
#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
|
||||
#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
|
||||
#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
|
||||
@ -231,6 +234,12 @@
|
||||
#define MTK_PHY_LED_BLINK_100RX BIT(3)
|
||||
#define MTK_PHY_LED_BLINK_10TX BIT(4)
|
||||
#define MTK_PHY_LED_BLINK_10RX BIT(5)
|
||||
#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
|
||||
MTK_PHY_LED_BLINK_100RX |\
|
||||
MTK_PHY_LED_BLINK_1000RX)
|
||||
#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
|
||||
MTK_PHY_LED_BLINK_100TX |\
|
||||
MTK_PHY_LED_BLINK_1000TX)
|
||||
#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
|
||||
#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
|
||||
#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
|
||||
@ -1247,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
|
||||
if (blink < 0)
|
||||
return -EIO;
|
||||
|
||||
if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
|
||||
MTK_PHY_LED_ON_LINK10)) ||
|
||||
(blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
|
||||
MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
|
||||
MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
|
||||
if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
|
||||
MTK_PHY_LED_ON_LINKDOWN)) ||
|
||||
(blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
|
||||
set_bit(bit_netdev, &priv->led_state);
|
||||
else
|
||||
clear_bit(bit_netdev, &priv->led_state);
|
||||
@ -1269,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
|
||||
if (!rules)
|
||||
return 0;
|
||||
|
||||
if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
|
||||
if (on & MTK_PHY_LED_ON_LINK)
|
||||
*rules |= BIT(TRIGGER_NETDEV_LINK);
|
||||
|
||||
if (on & MTK_PHY_LED_ON_LINK10)
|
||||
@ -1287,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
|
||||
if (on & MTK_PHY_LED_ON_HDX)
|
||||
*rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
|
||||
|
||||
if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
|
||||
if (blink & MTK_PHY_LED_BLINK_RX)
|
||||
*rules |= BIT(TRIGGER_NETDEV_RX);
|
||||
|
||||
if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
|
||||
if (blink & MTK_PHY_LED_BLINK_TX)
|
||||
*rules |= BIT(TRIGGER_NETDEV_TX);
|
||||
|
||||
return 0;
|
||||
@ -1323,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
|
||||
on |= MTK_PHY_LED_ON_LINK1000;
|
||||
|
||||
if (rules & BIT(TRIGGER_NETDEV_RX)) {
|
||||
blink |= MTK_PHY_LED_BLINK_10RX |
|
||||
MTK_PHY_LED_BLINK_100RX |
|
||||
MTK_PHY_LED_BLINK_1000RX;
|
||||
blink |= (on & MTK_PHY_LED_ON_LINK) ?
|
||||
(((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
|
||||
((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
|
||||
((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
|
||||
MTK_PHY_LED_BLINK_RX;
|
||||
}
|
||||
|
||||
if (rules & BIT(TRIGGER_NETDEV_TX)) {
|
||||
blink |= MTK_PHY_LED_BLINK_10TX |
|
||||
MTK_PHY_LED_BLINK_100TX |
|
||||
MTK_PHY_LED_BLINK_1000TX;
|
||||
blink |= (on & MTK_PHY_LED_ON_LINK) ?
|
||||
(((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
|
||||
((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
|
||||
((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
|
||||
MTK_PHY_LED_BLINK_TX;
|
||||
}
|
||||
|
||||
if (blink || on)
|
||||
@ -1344,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
|
||||
MTK_PHY_LED0_ON_CTRL,
|
||||
MTK_PHY_LED_ON_FDX |
|
||||
MTK_PHY_LED_ON_HDX |
|
||||
MTK_PHY_LED_ON_LINK10 |
|
||||
MTK_PHY_LED_ON_LINK100 |
|
||||
MTK_PHY_LED_ON_LINK1000,
|
||||
MTK_PHY_LED_ON_LINK,
|
||||
on);
|
||||
|
||||
if (ret)
|
||||
|
@ -1456,21 +1456,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
/* Skip IP alignment pseudo header */
|
||||
skb_pull(skb, 2);
|
||||
|
||||
skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
|
||||
ax88179_rx_checksum(skb, pkt_hdr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
ax_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
|
||||
if (!ax_skb)
|
||||
return 0;
|
||||
skb_trim(ax_skb, pkt_len);
|
||||
skb_put(ax_skb, pkt_len);
|
||||
memcpy(ax_skb->data, skb->data + 2, pkt_len);
|
||||
|
||||
/* Skip IP alignment pseudo header */
|
||||
skb_pull(ax_skb, 2);
|
||||
|
||||
skb->truesize = pkt_len_plus_padd +
|
||||
SKB_DATA_ALIGN(sizeof(struct sk_buff));
|
||||
ax88179_rx_checksum(ax_skb, pkt_hdr);
|
||||
usbnet_skb_return(dev, ax_skb);
|
||||
|
||||
|
@ -1368,6 +1368,9 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
|
@ -1615,6 +1615,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
|
||||
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
|
||||
return false;
|
||||
|
||||
/* Ignore packets from invalid src-address */
|
||||
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
|
||||
return false;
|
||||
|
||||
/* Get address from the outer IP header */
|
||||
if (vxlan_get_sk_family(vs) == AF_INET) {
|
||||
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
|
||||
|
@ -9020,6 +9020,7 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
|
||||
offload = &arvif->arp_ns_offload;
|
||||
count = 0;
|
||||
|
||||
/* Note: read_lock_bh() calls rcu_read_lock() */
|
||||
read_lock_bh(&idev->lock);
|
||||
|
||||
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
|
||||
@ -9050,7 +9051,8 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
|
||||
}
|
||||
|
||||
/* get anycast address */
|
||||
for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
|
||||
for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
|
||||
ifaca6 = rcu_dereference(ifaca6->aca_next)) {
|
||||
if (count >= ATH11K_IPV6_MAX_COUNT)
|
||||
goto generate;
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "fw/api/txq.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_BZ_UCODE_API_MAX 90
|
||||
#define IWL_BZ_UCODE_API_MAX 89
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_BZ_UCODE_API_MIN 80
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "fw/api/txq.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_SC_UCODE_API_MAX 90
|
||||
#define IWL_SC_UCODE_API_MAX 89
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_SC_UCODE_API_MIN 82
|
||||
|
@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
if (!pasn)
|
||||
return -ENOBUFS;
|
||||
|
||||
iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
|
||||
|
||||
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
|
||||
|
||||
switch (pasn->cipher) {
|
||||
|
@ -279,6 +279,7 @@ int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
||||
RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
|
||||
NULL);
|
||||
iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -296,7 +297,6 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
return 0;
|
||||
|
||||
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
|
||||
iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
|
||||
link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
|
||||
cmd.spec_link_id = link_conf->link_id;
|
||||
cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
|
||||
|
@ -2813,7 +2813,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
|
||||
if (ver_handler->version != scan_ver)
|
||||
continue;
|
||||
|
||||
return ver_handler->handler(mvm, vif, params, type, uid);
|
||||
err = ver_handler->handler(mvm, vif, params, type, uid);
|
||||
return err ? : uid;
|
||||
}
|
||||
|
||||
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
|
||||
|
@ -3899,7 +3899,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
|
||||
}
|
||||
|
||||
nla_for_each_nested(peer, peers, rem) {
|
||||
struct cfg80211_pmsr_result result;
|
||||
struct cfg80211_pmsr_result result = {};
|
||||
|
||||
err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
|
||||
if (err)
|
||||
|
@ -424,7 +424,8 @@ struct trf7970a {
|
||||
enum trf7970a_state state;
|
||||
struct device *dev;
|
||||
struct spi_device *spi;
|
||||
struct regulator *regulator;
|
||||
struct regulator *vin_regulator;
|
||||
struct regulator *vddio_regulator;
|
||||
struct nfc_digital_dev *ddev;
|
||||
u32 quirks;
|
||||
bool is_initiator;
|
||||
@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
|
||||
if (trf->state != TRF7970A_ST_PWR_OFF)
|
||||
return 0;
|
||||
|
||||
ret = regulator_enable(trf->regulator);
|
||||
ret = regulator_enable(trf->vin_regulator);
|
||||
if (ret) {
|
||||
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
|
||||
return ret;
|
||||
@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
|
||||
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
|
||||
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
|
||||
|
||||
ret = regulator_disable(trf->regulator);
|
||||
ret = regulator_disable(trf->vin_regulator);
|
||||
if (ret)
|
||||
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
|
||||
ret);
|
||||
@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
|
||||
mutex_init(&trf->lock);
|
||||
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
|
||||
|
||||
trf->regulator = devm_regulator_get(&spi->dev, "vin");
|
||||
if (IS_ERR(trf->regulator)) {
|
||||
ret = PTR_ERR(trf->regulator);
|
||||
trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
|
||||
if (IS_ERR(trf->vin_regulator)) {
|
||||
ret = PTR_ERR(trf->vin_regulator);
|
||||
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
|
||||
goto err_destroy_lock;
|
||||
}
|
||||
|
||||
ret = regulator_enable(trf->regulator);
|
||||
ret = regulator_enable(trf->vin_regulator);
|
||||
if (ret) {
|
||||
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
|
||||
goto err_destroy_lock;
|
||||
}
|
||||
|
||||
uvolts = regulator_get_voltage(trf->regulator);
|
||||
uvolts = regulator_get_voltage(trf->vin_regulator);
|
||||
if (uvolts > 4000000)
|
||||
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
|
||||
|
||||
trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
|
||||
if (IS_ERR(trf->regulator)) {
|
||||
ret = PTR_ERR(trf->regulator);
|
||||
trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
|
||||
if (IS_ERR(trf->vddio_regulator)) {
|
||||
ret = PTR_ERR(trf->vddio_regulator);
|
||||
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
|
||||
goto err_destroy_lock;
|
||||
goto err_disable_vin_regulator;
|
||||
}
|
||||
|
||||
ret = regulator_enable(trf->regulator);
|
||||
ret = regulator_enable(trf->vddio_regulator);
|
||||
if (ret) {
|
||||
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
|
||||
goto err_destroy_lock;
|
||||
goto err_disable_vin_regulator;
|
||||
}
|
||||
|
||||
if (regulator_get_voltage(trf->regulator) == 1800000) {
|
||||
if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
|
||||
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
|
||||
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
|
||||
}
|
||||
@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
|
||||
if (!trf->ddev) {
|
||||
dev_err(trf->dev, "Can't allocate NFC digital device\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_disable_regulator;
|
||||
goto err_disable_vddio_regulator;
|
||||
}
|
||||
|
||||
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
|
||||
@ -2137,8 +2138,10 @@ err_shutdown:
|
||||
trf7970a_shutdown(trf);
|
||||
err_free_ddev:
|
||||
nfc_digital_free_device(trf->ddev);
|
||||
err_disable_regulator:
|
||||
regulator_disable(trf->regulator);
|
||||
err_disable_vddio_regulator:
|
||||
regulator_disable(trf->vddio_regulator);
|
||||
err_disable_vin_regulator:
|
||||
regulator_disable(trf->vin_regulator);
|
||||
err_destroy_lock:
|
||||
mutex_destroy(&trf->lock);
|
||||
return ret;
|
||||
@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
|
||||
nfc_digital_unregister_device(trf->ddev);
|
||||
nfc_digital_free_device(trf->ddev);
|
||||
|
||||
regulator_disable(trf->regulator);
|
||||
regulator_disable(trf->vddio_regulator);
|
||||
regulator_disable(trf->vin_regulator);
|
||||
|
||||
mutex_destroy(&trf->lock);
|
||||
}
|
||||
|
@ -607,6 +607,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
|
||||
eth_hw_addr_set(dev, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* eth_skb_pkt_type - Assign packet type if destination address does not match
|
||||
* @skb: Assigned a packet type if address does not match @dev address
|
||||
* @dev: Network device used to compare packet address against
|
||||
*
|
||||
* If the destination MAC address of the packet does not match the network
|
||||
* device address, assign an appropriate packet type.
|
||||
*/
|
||||
static inline void eth_skb_pkt_type(struct sk_buff *skb,
|
||||
const struct net_device *dev)
|
||||
{
|
||||
const struct ethhdr *eth = eth_hdr(skb);
|
||||
|
||||
if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
|
||||
if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
|
||||
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
|
||||
skb->pkt_type = PACKET_BROADCAST;
|
||||
else
|
||||
skb->pkt_type = PACKET_MULTICAST;
|
||||
} else {
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
|
||||
* @skb: Buffer to pad
|
||||
|
@ -85,6 +85,9 @@ enum unix_socket_lock_class {
|
||||
U_LOCK_NORMAL,
|
||||
U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
|
||||
U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
|
||||
U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
|
||||
* candidates to close a small race window.
|
||||
*/
|
||||
};
|
||||
|
||||
static inline void unix_state_lock_nested(struct sock *sk,
|
||||
|
@ -738,6 +738,8 @@ struct hci_conn {
|
||||
__u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
|
||||
__u16 le_per_adv_data_len;
|
||||
__u16 le_per_adv_data_offset;
|
||||
__u8 le_adv_phy;
|
||||
__u8 le_adv_sec_phy;
|
||||
__u8 le_tx_phy;
|
||||
__u8 le_rx_phy;
|
||||
__s8 rssi;
|
||||
@ -1512,7 +1514,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
enum conn_reasons conn_reason);
|
||||
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, bool dst_resolved, u8 sec_level,
|
||||
u16 conn_timeout, u8 role);
|
||||
u16 conn_timeout, u8 role, u8 phy, u8 sec_phy);
|
||||
void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
|
||||
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 sec_level, u8 auth_type,
|
||||
@ -1905,6 +1907,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
|
||||
#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
|
||||
(hdev->commands[39] & 0x04))
|
||||
|
||||
#define read_key_size_capable(dev) \
|
||||
((dev)->commands[20] & 0x10 && \
|
||||
!test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
|
||||
|
||||
/* Use enhanced synchronous connection if command is supported and its quirk
|
||||
* has not been set.
|
||||
*/
|
||||
|
@ -953,6 +953,8 @@ enum mac80211_tx_info_flags {
|
||||
* of their QoS TID or other priority field values.
|
||||
* @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
|
||||
* for sequence number assignment
|
||||
* @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
|
||||
* due to scanning, not in normal operation on the interface.
|
||||
* @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
|
||||
* frame should be transmitted on the specific link. This really is
|
||||
* only relevant for frames that do not have data present, and is
|
||||
@ -973,6 +975,7 @@ enum mac80211_tx_control_flags {
|
||||
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
|
||||
IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
|
||||
IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
|
||||
IEEE80211_TX_CTRL_SCAN_TX = BIT(10),
|
||||
IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
|
||||
};
|
||||
|
||||
|
@ -321,6 +321,7 @@ struct macsec_context {
|
||||
* for the TX tag
|
||||
* @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
|
||||
* TX tag
|
||||
* @rx_uses_md_dst: whether MACsec device offload supports sk_buff md_dst
|
||||
*/
|
||||
struct macsec_ops {
|
||||
/* Device wide */
|
||||
@ -352,6 +353,7 @@ struct macsec_ops {
|
||||
struct sk_buff *skb);
|
||||
unsigned int needed_headroom;
|
||||
unsigned int needed_tailroom;
|
||||
bool rx_uses_md_dst;
|
||||
};
|
||||
|
||||
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
|
||||
|
@ -1410,32 +1410,34 @@ sk_memory_allocated(const struct sock *sk)
|
||||
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
|
||||
extern int sysctl_mem_pcpu_rsv;
|
||||
|
||||
static inline void
|
||||
sk_memory_allocated_add(struct sock *sk, int amt)
|
||||
static inline void proto_memory_pcpu_drain(struct proto *proto)
|
||||
{
|
||||
int local_reserve;
|
||||
int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
|
||||
|
||||
preempt_disable();
|
||||
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
|
||||
if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
|
||||
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
|
||||
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
|
||||
}
|
||||
preempt_enable();
|
||||
if (val)
|
||||
atomic_long_add(val, proto->memory_allocated);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sk_memory_allocated_sub(struct sock *sk, int amt)
|
||||
sk_memory_allocated_add(const struct sock *sk, int val)
|
||||
{
|
||||
int local_reserve;
|
||||
struct proto *proto = sk->sk_prot;
|
||||
|
||||
preempt_disable();
|
||||
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
|
||||
if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
|
||||
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
|
||||
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
|
||||
}
|
||||
preempt_enable();
|
||||
val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
|
||||
|
||||
if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
|
||||
proto_memory_pcpu_drain(proto);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sk_memory_allocated_sub(const struct sock *sk, int val)
|
||||
{
|
||||
struct proto *proto = sk->sk_prot;
|
||||
|
||||
val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
|
||||
|
||||
if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
|
||||
proto_memory_pcpu_drain(proto);
|
||||
}
|
||||
|
||||
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
|
||||
|
@ -111,7 +111,8 @@ struct tls_strparser {
|
||||
u32 stopped : 1;
|
||||
u32 copy_mode : 1;
|
||||
u32 mixed_decrypted : 1;
|
||||
u32 msg_ready : 1;
|
||||
|
||||
bool msg_ready;
|
||||
|
||||
struct strp_msg stm;
|
||||
|
||||
|
@ -103,7 +103,7 @@ again:
|
||||
s->ax25_dev = NULL;
|
||||
if (sk->sk_socket) {
|
||||
netdev_put(ax25_dev->dev,
|
||||
&ax25_dev->dev_tracker);
|
||||
&s->dev_tracker);
|
||||
ax25_dev_put(ax25_dev);
|
||||
}
|
||||
ax25_cb_del(s);
|
||||
|
@ -1263,7 +1263,7 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
|
||||
|
||||
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, bool dst_resolved, u8 sec_level,
|
||||
u16 conn_timeout, u8 role)
|
||||
u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
struct smp_irk *irk;
|
||||
@ -1326,6 +1326,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
conn->dst_type = dst_type;
|
||||
conn->sec_level = BT_SECURITY_LOW;
|
||||
conn->conn_timeout = conn_timeout;
|
||||
conn->le_adv_phy = phy;
|
||||
conn->le_adv_sec_phy = sec_phy;
|
||||
|
||||
err = hci_connect_le_sync(hdev, conn);
|
||||
if (err) {
|
||||
@ -2273,7 +2275,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
le = hci_connect_le(hdev, dst, dst_type, false,
|
||||
BT_SECURITY_LOW,
|
||||
HCI_LE_CONN_TIMEOUT,
|
||||
HCI_ROLE_SLAVE);
|
||||
HCI_ROLE_SLAVE, 0, 0);
|
||||
else
|
||||
le = hci_connect_le_scan(hdev, dst, dst_type,
|
||||
BT_SECURITY_LOW,
|
||||
|
@ -3218,7 +3218,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
if (key) {
|
||||
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
|
||||
|
||||
if (!(hdev->commands[20] & 0x10)) {
|
||||
if (!read_key_size_capable(hdev)) {
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
} else {
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
@ -3666,8 +3666,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
|
||||
* controller really supports it. If it doesn't, assume
|
||||
* the default size (16).
|
||||
*/
|
||||
if (!(hdev->commands[20] & 0x10) ||
|
||||
test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) {
|
||||
if (!read_key_size_capable(hdev)) {
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
goto notify;
|
||||
}
|
||||
@ -6038,7 +6037,7 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
|
||||
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
|
||||
bdaddr_t *addr,
|
||||
u8 addr_type, bool addr_resolved,
|
||||
u8 adv_type)
|
||||
u8 adv_type, u8 phy, u8 sec_phy)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
struct hci_conn_params *params;
|
||||
@ -6093,7 +6092,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
|
||||
|
||||
conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
|
||||
BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
|
||||
HCI_ROLE_MASTER);
|
||||
HCI_ROLE_MASTER, phy, sec_phy);
|
||||
if (!IS_ERR(conn)) {
|
||||
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
|
||||
* by higher layer that tried to connect, if no then
|
||||
@ -6128,8 +6127,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
|
||||
|
||||
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
||||
u8 bdaddr_type, bdaddr_t *direct_addr,
|
||||
u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
|
||||
bool ext_adv, bool ctl_time, u64 instant)
|
||||
u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
|
||||
u8 *data, u8 len, bool ext_adv, bool ctl_time,
|
||||
u64 instant)
|
||||
{
|
||||
struct discovery_state *d = &hdev->discovery;
|
||||
struct smp_irk *irk;
|
||||
@ -6217,7 +6217,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
||||
* for advertising reports) and is already verified to be RPA above.
|
||||
*/
|
||||
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
|
||||
type);
|
||||
type, phy, sec_phy);
|
||||
if (!ext_adv && conn && type == LE_ADV_IND &&
|
||||
len <= max_adv_len(hdev)) {
|
||||
/* Store report for later inclusion by
|
||||
@ -6363,7 +6363,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
|
||||
if (info->length <= max_adv_len(hdev)) {
|
||||
rssi = info->data[info->length];
|
||||
process_adv_report(hdev, info->type, &info->bdaddr,
|
||||
info->bdaddr_type, NULL, 0, rssi,
|
||||
info->bdaddr_type, NULL, 0,
|
||||
HCI_ADV_PHY_1M, 0, rssi,
|
||||
info->data, info->length, false,
|
||||
false, instant);
|
||||
} else {
|
||||
@ -6448,6 +6449,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
|
||||
if (legacy_evt_type != LE_ADV_INVALID) {
|
||||
process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
|
||||
info->bdaddr_type, NULL, 0,
|
||||
info->primary_phy,
|
||||
info->secondary_phy,
|
||||
info->rssi, info->data, info->length,
|
||||
!(evt_type & LE_EXT_ADV_LEGACY_PDU),
|
||||
false, instant);
|
||||
@ -6730,8 +6733,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
|
||||
|
||||
process_adv_report(hdev, info->type, &info->bdaddr,
|
||||
info->bdaddr_type, &info->direct_addr,
|
||||
info->direct_addr_type, info->rssi, NULL, 0,
|
||||
false, false, instant);
|
||||
info->direct_addr_type, HCI_ADV_PHY_1M, 0,
|
||||
info->rssi, NULL, 0, false, false, instant);
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
@ -6346,7 +6346,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
|
||||
|
||||
plen = sizeof(*cp);
|
||||
|
||||
if (scan_1m(hdev)) {
|
||||
if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
|
||||
conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
|
||||
cp->phys |= LE_SCAN_PHY_1M;
|
||||
set_ext_conn_params(conn, p);
|
||||
|
||||
@ -6354,7 +6355,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
|
||||
plen += sizeof(*p);
|
||||
}
|
||||
|
||||
if (scan_2m(hdev)) {
|
||||
if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
|
||||
conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
|
||||
cp->phys |= LE_SCAN_PHY_2M;
|
||||
set_ext_conn_params(conn, p);
|
||||
|
||||
@ -6362,7 +6364,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
|
||||
plen += sizeof(*p);
|
||||
}
|
||||
|
||||
if (scan_coded(hdev)) {
|
||||
if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
|
||||
conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
|
||||
cp->phys |= LE_SCAN_PHY_CODED;
|
||||
set_ext_conn_params(conn, p);
|
||||
|
||||
|
@ -7018,7 +7018,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
||||
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
|
||||
hcon = hci_connect_le(hdev, dst, dst_type, false,
|
||||
chan->sec_level, timeout,
|
||||
HCI_ROLE_SLAVE);
|
||||
HCI_ROLE_SLAVE, 0, 0);
|
||||
else
|
||||
hcon = hci_connect_le_scan(hdev, dst, dst_type,
|
||||
chan->sec_level, timeout,
|
||||
|
@ -439,7 +439,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
|
||||
struct l2cap_options opts;
|
||||
struct l2cap_conninfo cinfo;
|
||||
int len, err = 0;
|
||||
int err = 0;
|
||||
size_t len;
|
||||
u32 opt;
|
||||
|
||||
BT_DBG("sk %p", sk);
|
||||
@ -486,7 +487,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
|
||||
BT_DBG("mode 0x%2.2x", chan->mode);
|
||||
|
||||
len = min_t(unsigned int, len, sizeof(opts));
|
||||
len = min(len, sizeof(opts));
|
||||
if (copy_to_user(optval, (char *) &opts, len))
|
||||
err = -EFAULT;
|
||||
|
||||
@ -536,7 +537,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
cinfo.hci_handle = chan->conn->hcon->handle;
|
||||
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
|
||||
|
||||
len = min_t(unsigned int, len, sizeof(cinfo));
|
||||
len = min(len, sizeof(cinfo));
|
||||
if (copy_to_user(optval, (char *) &cinfo, len))
|
||||
err = -EFAULT;
|
||||
|
||||
|
@ -2623,7 +2623,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
|
||||
/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
|
||||
* hci_cmd_sync_submit instead of hci_cmd_sync_queue.
|
||||
*/
|
||||
err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
|
||||
mgmt_class_complete);
|
||||
if (err < 0) {
|
||||
mgmt_pending_free(cmd);
|
||||
goto failed;
|
||||
@ -2717,8 +2721,11 @@ update_class:
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
|
||||
mgmt_class_complete);
|
||||
/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
|
||||
* hci_cmd_sync_submit instead of hci_cmd_sync_queue.
|
||||
*/
|
||||
err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
|
||||
mgmt_class_complete);
|
||||
if (err < 0)
|
||||
mgmt_pending_free(cmd);
|
||||
|
||||
@ -2784,8 +2791,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
|
||||
mgmt_class_complete);
|
||||
/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
|
||||
* hci_cmd_sync_submit instead of hci_cmd_sync_queue.
|
||||
*/
|
||||
err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
|
||||
mgmt_class_complete);
|
||||
if (err < 0)
|
||||
mgmt_pending_free(cmd);
|
||||
|
||||
@ -5475,8 +5485,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
|
||||
mgmt_remove_adv_monitor_complete);
|
||||
err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
|
||||
mgmt_remove_adv_monitor_complete);
|
||||
|
||||
if (err) {
|
||||
mgmt_pending_remove(cmd);
|
||||
|
@ -964,7 +964,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
struct sock *sk = sock->sk;
|
||||
struct sco_options opts;
|
||||
struct sco_conninfo cinfo;
|
||||
int len, err = 0;
|
||||
int err = 0;
|
||||
size_t len;
|
||||
|
||||
BT_DBG("sk %p", sk);
|
||||
|
||||
@ -986,7 +987,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
|
||||
BT_DBG("mtu %u", opts.mtu);
|
||||
|
||||
len = min_t(unsigned int, len, sizeof(opts));
|
||||
len = min(len, sizeof(opts));
|
||||
if (copy_to_user(optval, (char *)&opts, len))
|
||||
err = -EFAULT;
|
||||
|
||||
@ -1004,7 +1005,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
|
||||
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
|
||||
|
||||
len = min_t(unsigned int, len, sizeof(cinfo));
|
||||
len = min(len, sizeof(cinfo));
|
||||
if (copy_to_user(optval, (char *)&cinfo, len))
|
||||
err = -EFAULT;
|
||||
|
||||
|
@ -667,7 +667,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
|
||||
{
|
||||
u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
|
||||
|
||||
return br_info_notify(event, br, port, filter);
|
||||
br_info_notify(event, br, port, filter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
|
||||
eth = (struct ethhdr *)skb->data;
|
||||
skb_pull_inline(skb, ETH_HLEN);
|
||||
|
||||
if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
|
||||
dev->dev_addr))) {
|
||||
if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
|
||||
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
|
||||
skb->pkt_type = PACKET_BROADCAST;
|
||||
else
|
||||
skb->pkt_type = PACKET_MULTICAST;
|
||||
} else {
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
}
|
||||
}
|
||||
eth_skb_pkt_type(skb, dev);
|
||||
|
||||
/*
|
||||
* Some variants of DSA tagging don't have an ethertype field
|
||||
|
@ -92,6 +92,7 @@
|
||||
#include <net/inet_common.h>
|
||||
#include <net/ip_fib.h>
|
||||
#include <net/l3mdev.h>
|
||||
#include <net/addrconf.h>
|
||||
|
||||
/*
|
||||
* Build xmit assembly blocks
|
||||
@ -1032,6 +1033,8 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
|
||||
struct icmp_ext_hdr *ext_hdr, _ext_hdr;
|
||||
struct icmp_ext_echo_iio *iio, _iio;
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct inet6_dev *in6_dev;
|
||||
struct in_device *in_dev;
|
||||
struct net_device *dev;
|
||||
char buff[IFNAMSIZ];
|
||||
u16 ident_len;
|
||||
@ -1115,10 +1118,15 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
|
||||
/* Fill bits in reply message */
|
||||
if (dev->flags & IFF_UP)
|
||||
status |= ICMP_EXT_ECHOREPLY_ACTIVE;
|
||||
if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
|
||||
|
||||
in_dev = __in_dev_get_rcu(dev);
|
||||
if (in_dev && rcu_access_pointer(in_dev->ifa_list))
|
||||
status |= ICMP_EXT_ECHOREPLY_IPV4;
|
||||
if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
|
||||
|
||||
in6_dev = __in6_dev_get(dev);
|
||||
if (in6_dev && !list_empty(&in6_dev->addr_list))
|
||||
status |= ICMP_EXT_ECHOREPLY_IPV6;
|
||||
|
||||
dev_put(dev);
|
||||
icmphdr->un.echo.sequence |= htons(status);
|
||||
return true;
|
||||
|
@ -2166,6 +2166,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
int err = -EINVAL;
|
||||
u32 tag = 0;
|
||||
|
||||
if (!in_dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
|
||||
goto martian_source;
|
||||
|
||||
|
@ -1068,6 +1068,7 @@ void tcp_ao_connect_init(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_ao_info *ao_info;
|
||||
struct hlist_node *next;
|
||||
union tcp_ao_addr *addr;
|
||||
struct tcp_ao_key *key;
|
||||
int family, l3index;
|
||||
@ -1090,7 +1091,7 @@ void tcp_ao_connect_init(struct sock *sk)
|
||||
l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
|
||||
sk->sk_bound_dev_if);
|
||||
|
||||
hlist_for_each_entry_rcu(key, &ao_info->head, node) {
|
||||
hlist_for_each_entry_safe(key, next, &ao_info->head, node) {
|
||||
if (!tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1))
|
||||
continue;
|
||||
|
||||
|
@ -1123,16 +1123,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
|
||||
if (msg->msg_controllen) {
|
||||
err = udp_cmsg_send(sk, msg, &ipc.gso_size);
|
||||
if (err > 0)
|
||||
if (err > 0) {
|
||||
err = ip_cmsg_send(sk, msg, &ipc,
|
||||
sk->sk_family == AF_INET6);
|
||||
connected = 0;
|
||||
}
|
||||
if (unlikely(err < 0)) {
|
||||
kfree(ipc.opt);
|
||||
return err;
|
||||
}
|
||||
if (ipc.opt)
|
||||
free = 1;
|
||||
connected = 0;
|
||||
}
|
||||
if (!ipc.opt) {
|
||||
struct ip_options_rcu *inet_opt;
|
||||
|
@ -1474,9 +1474,11 @@ do_udp_sendmsg:
|
||||
ipc6.opt = opt;
|
||||
|
||||
err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
|
||||
if (err > 0)
|
||||
if (err > 0) {
|
||||
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
|
||||
&ipc6);
|
||||
connected = false;
|
||||
}
|
||||
if (err < 0) {
|
||||
fl6_sock_release(flowlabel);
|
||||
return err;
|
||||
@ -1488,7 +1490,6 @@ do_udp_sendmsg:
|
||||
}
|
||||
if (!(opt->opt_nflen|opt->opt_flen))
|
||||
opt = NULL;
|
||||
connected = false;
|
||||
}
|
||||
if (!opt) {
|
||||
opt = txopt_get(np);
|
||||
|
@ -797,6 +797,7 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_chanctx_conf *conf;
|
||||
struct ieee80211_chanctx *curr_ctx = NULL;
|
||||
bool new_idle;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN))
|
||||
@ -829,8 +830,6 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
|
||||
out:
|
||||
rcu_assign_pointer(link->conf->chanctx_conf, conf);
|
||||
|
||||
sdata->vif.cfg.idle = !conf;
|
||||
|
||||
if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
|
||||
ieee80211_recalc_chanctx_chantype(local, curr_ctx);
|
||||
ieee80211_recalc_smps_chanctx(local, curr_ctx);
|
||||
@ -843,9 +842,27 @@ out:
|
||||
ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
|
||||
}
|
||||
|
||||
if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
|
||||
sdata->vif.type != NL80211_IFTYPE_MONITOR)
|
||||
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
|
||||
if (conf) {
|
||||
new_idle = false;
|
||||
} else {
|
||||
struct ieee80211_link_data *tmp;
|
||||
|
||||
new_idle = true;
|
||||
for_each_sdata_link(local, tmp) {
|
||||
if (rcu_access_pointer(tmp->conf->chanctx_conf)) {
|
||||
new_idle = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (new_idle != sdata->vif.cfg.idle) {
|
||||
sdata->vif.cfg.idle = new_idle;
|
||||
|
||||
if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
|
||||
sdata->vif.type != NL80211_IFTYPE_MONITOR)
|
||||
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
|
||||
}
|
||||
|
||||
ieee80211_check_fast_xmit_iface(sdata);
|
||||
|
||||
|
@ -747,6 +747,9 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
||||
struct sk_buff *skb, u32 ctrl_flags)
|
||||
{
|
||||
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
||||
struct ieee80211_mesh_fast_tx_key key = {
|
||||
.type = MESH_FAST_TX_TYPE_LOCAL
|
||||
};
|
||||
struct ieee80211_mesh_fast_tx *entry;
|
||||
struct ieee80211s_hdr *meshhdr;
|
||||
u8 sa[ETH_ALEN] __aligned(2);
|
||||
@ -782,7 +785,10 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
||||
return false;
|
||||
}
|
||||
|
||||
entry = mesh_fast_tx_get(sdata, skb->data);
|
||||
ether_addr_copy(key.addr, skb->data);
|
||||
if (!ether_addr_equal(skb->data + ETH_ALEN, sdata->vif.addr))
|
||||
key.type = MESH_FAST_TX_TYPE_PROXIED;
|
||||
entry = mesh_fast_tx_get(sdata, &key);
|
||||
if (!entry)
|
||||
return false;
|
||||
|
||||
|
@ -134,10 +134,39 @@ struct mesh_path {
|
||||
#define MESH_FAST_TX_CACHE_THRESHOLD_SIZE 384
|
||||
#define MESH_FAST_TX_CACHE_TIMEOUT 8000 /* msecs */
|
||||
|
||||
/**
|
||||
* enum ieee80211_mesh_fast_tx_type - cached mesh fast tx entry type
|
||||
*
|
||||
* @MESH_FAST_TX_TYPE_LOCAL: tx from the local vif address as SA
|
||||
* @MESH_FAST_TX_TYPE_PROXIED: local tx with a different SA (e.g. bridged)
|
||||
* @MESH_FAST_TX_TYPE_FORWARDED: forwarded from a different mesh point
|
||||
* @NUM_MESH_FAST_TX_TYPE: number of entry types
|
||||
*/
|
||||
enum ieee80211_mesh_fast_tx_type {
|
||||
MESH_FAST_TX_TYPE_LOCAL,
|
||||
MESH_FAST_TX_TYPE_PROXIED,
|
||||
MESH_FAST_TX_TYPE_FORWARDED,
|
||||
|
||||
/* must be last */
|
||||
NUM_MESH_FAST_TX_TYPE
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct ieee80211_mesh_fast_tx_key - cached mesh fast tx entry key
|
||||
*
|
||||
* @addr: The Ethernet DA for this entry
|
||||
* @type: cache entry type
|
||||
*/
|
||||
struct ieee80211_mesh_fast_tx_key {
|
||||
u8 addr[ETH_ALEN] __aligned(2);
|
||||
u16 type;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ieee80211_mesh_fast_tx - cached mesh fast tx entry
|
||||
* @rhash: rhashtable pointer
|
||||
* @addr_key: The Ethernet DA which is the key for this entry
|
||||
* @key: the lookup key for this cache entry
|
||||
* @fast_tx: base fast_tx data
|
||||
* @hdr: cached mesh and rfc1042 headers
|
||||
* @hdrlen: length of mesh + rfc1042
|
||||
@ -148,7 +177,7 @@ struct mesh_path {
|
||||
*/
|
||||
struct ieee80211_mesh_fast_tx {
|
||||
struct rhash_head rhash;
|
||||
u8 addr_key[ETH_ALEN] __aligned(2);
|
||||
struct ieee80211_mesh_fast_tx_key key;
|
||||
|
||||
struct ieee80211_fast_tx fast_tx;
|
||||
u8 hdr[sizeof(struct ieee80211s_hdr) + sizeof(rfc1042_header)];
|
||||
@ -334,7 +363,8 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
|
||||
|
||||
bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
|
||||
struct ieee80211_mesh_fast_tx *
|
||||
mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr);
|
||||
mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_mesh_fast_tx_key *key);
|
||||
bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
||||
struct sk_buff *skb, u32 ctrl_flags);
|
||||
void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
|
||||
|
@ -37,8 +37,8 @@ static const struct rhashtable_params mesh_rht_params = {
|
||||
static const struct rhashtable_params fast_tx_rht_params = {
|
||||
.nelem_hint = 10,
|
||||
.automatic_shrinking = true,
|
||||
.key_len = ETH_ALEN,
|
||||
.key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key),
|
||||
.key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
|
||||
.key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
|
||||
.head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
|
||||
.hashfn = mesh_table_hash,
|
||||
};
|
||||
@ -431,20 +431,21 @@ static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
|
||||
}
|
||||
|
||||
struct ieee80211_mesh_fast_tx *
|
||||
mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr)
|
||||
mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_mesh_fast_tx_key *key)
|
||||
{
|
||||
struct ieee80211_mesh_fast_tx *entry;
|
||||
struct mesh_tx_cache *cache;
|
||||
|
||||
cache = &sdata->u.mesh.tx_cache;
|
||||
entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
|
||||
entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
|
||||
mpath_expired(entry->mpath)) {
|
||||
spin_lock_bh(&cache->walk_lock);
|
||||
entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
|
||||
entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
|
||||
if (entry)
|
||||
mesh_fast_tx_entry_free(cache, entry);
|
||||
spin_unlock_bh(&cache->walk_lock);
|
||||
@ -489,18 +490,24 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
|
||||
if (!sta)
|
||||
return;
|
||||
|
||||
build.key.type = MESH_FAST_TX_TYPE_LOCAL;
|
||||
if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
|
||||
/* This is required to keep the mppath alive */
|
||||
mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
|
||||
if (!mppath)
|
||||
return;
|
||||
build.mppath = mppath;
|
||||
if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
|
||||
build.key.type = MESH_FAST_TX_TYPE_PROXIED;
|
||||
} else if (ieee80211_has_a4(hdr->frame_control)) {
|
||||
mppath = mpath;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
|
||||
build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
|
||||
|
||||
/* rate limit, in case fast xmit can't be enabled */
|
||||
if (mppath->fast_tx_check == jiffies)
|
||||
return;
|
||||
@ -547,7 +554,7 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(build.addr_key, mppath->dst, ETH_ALEN);
|
||||
memcpy(build.key.addr, mppath->dst, ETH_ALEN);
|
||||
build.timestamp = jiffies;
|
||||
build.fast_tx.band = info->band;
|
||||
build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
|
||||
@ -646,12 +653,18 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
|
||||
const u8 *addr)
|
||||
{
|
||||
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
|
||||
struct ieee80211_mesh_fast_tx_key key = {};
|
||||
struct ieee80211_mesh_fast_tx *entry;
|
||||
int i;
|
||||
|
||||
ether_addr_copy(key.addr, addr);
|
||||
spin_lock_bh(&cache->walk_lock);
|
||||
entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
|
||||
if (entry)
|
||||
mesh_fast_tx_entry_free(cache, entry);
|
||||
for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
|
||||
key.type = i;
|
||||
entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
|
||||
if (entry)
|
||||
mesh_fast_tx_entry_free(cache, entry);
|
||||
}
|
||||
spin_unlock_bh(&cache->walk_lock);
|
||||
}
|
||||
|
||||
|
@ -616,7 +616,6 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
|
||||
.from_ap = true,
|
||||
.start = ies->data,
|
||||
.len = ies->len,
|
||||
.mode = conn->mode,
|
||||
};
|
||||
struct ieee802_11_elems *elems;
|
||||
struct ieee80211_supported_band *sband;
|
||||
@ -625,6 +624,7 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
|
||||
int ret;
|
||||
|
||||
again:
|
||||
parse_params.mode = conn->mode;
|
||||
elems = ieee802_11_parse_elems_full(&parse_params);
|
||||
if (!elems)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -632,15 +632,21 @@ again:
|
||||
ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
|
||||
elems, false, conn, &ap_chandef);
|
||||
|
||||
mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
|
||||
cbss->bssid, ieee80211_conn_mode_str(ap_mode));
|
||||
|
||||
/* this should be impossible since parsing depends on our mode */
|
||||
if (WARN_ON(ap_mode > conn->mode)) {
|
||||
ret = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
|
||||
if (conn->mode != ap_mode) {
|
||||
conn->mode = ap_mode;
|
||||
kfree(elems);
|
||||
goto again;
|
||||
}
|
||||
|
||||
mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
|
||||
cbss->bssid, ieee80211_conn_mode_str(ap_mode));
|
||||
|
||||
sband = sdata->local->hw.wiphy->bands[channel->band];
|
||||
|
||||
switch (channel->band) {
|
||||
@ -691,7 +697,6 @@ again:
|
||||
break;
|
||||
}
|
||||
|
||||
conn->mode = ap_mode;
|
||||
chanreq->oper = ap_chandef;
|
||||
|
||||
/* wider-bandwidth OFDMA is only done in EHT */
|
||||
@ -753,8 +758,10 @@ again:
|
||||
}
|
||||
|
||||
/* the mode can only decrease, so this must terminate */
|
||||
if (ap_mode != conn->mode)
|
||||
if (ap_mode != conn->mode) {
|
||||
kfree(elems);
|
||||
goto again;
|
||||
}
|
||||
|
||||
mlme_link_id_dbg(sdata, link_id,
|
||||
"connecting with %s mode, max bandwidth %d MHz\n",
|
||||
@ -5812,7 +5819,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
|
||||
*/
|
||||
if (control &
|
||||
IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
|
||||
link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos);
|
||||
link_removal_timeout[link_id] = get_unaligned_le16(pos);
|
||||
}
|
||||
|
||||
removed_links &= sdata->vif.valid_links;
|
||||
@ -5837,8 +5844,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
|
||||
continue;
|
||||
}
|
||||
|
||||
link_delay = link_conf->beacon_int *
|
||||
link_removal_timeout[link_id];
|
||||
if (link_removal_timeout[link_id] < 1)
|
||||
link_delay = 0;
|
||||
else
|
||||
link_delay = link_conf->beacon_int *
|
||||
(link_removal_timeout[link_id] - 1);
|
||||
|
||||
if (!delay)
|
||||
delay = link_delay;
|
||||
@ -6193,7 +6203,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
|
||||
link->u.mgd.dtim_period = elems->dtim_period;
|
||||
link->u.mgd.have_beacon = true;
|
||||
ifmgd->assoc_data->need_beacon = false;
|
||||
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
|
||||
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
|
||||
!ieee80211_is_s1g_beacon(hdr->frame_control)) {
|
||||
link->conf->sync_tsf =
|
||||
le64_to_cpu(mgmt->u.beacon.timestamp);
|
||||
link->conf->sync_device_ts =
|
||||
|
@ -877,6 +877,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_supported_band *sband;
|
||||
u32 mask = ~0;
|
||||
|
||||
rate_control_fill_sta_table(sta, info, dest, max_rates);
|
||||
|
||||
@ -889,9 +890,12 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
|
||||
if (ieee80211_is_tx_data(skb))
|
||||
rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
|
||||
|
||||
if (!(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX))
|
||||
mask = sdata->rc_rateidx_mask[info->band];
|
||||
|
||||
if (dest[0].idx < 0)
|
||||
__rate_control_send_low(&sdata->local->hw, sband, sta, info,
|
||||
sdata->rc_rateidx_mask[info->band]);
|
||||
mask);
|
||||
|
||||
if (sta)
|
||||
rate_fixup_ratelist(vif, sband, info, dest, max_rates);
|
||||
|
@ -2763,7 +2763,10 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
|
||||
struct sk_buff *skb, int hdrlen)
|
||||
{
|
||||
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
||||
struct ieee80211_mesh_fast_tx *entry = NULL;
|
||||
struct ieee80211_mesh_fast_tx_key key = {
|
||||
.type = MESH_FAST_TX_TYPE_FORWARDED
|
||||
};
|
||||
struct ieee80211_mesh_fast_tx *entry;
|
||||
struct ieee80211s_hdr *mesh_hdr;
|
||||
struct tid_ampdu_tx *tid_tx;
|
||||
struct sta_info *sta;
|
||||
@ -2772,9 +2775,13 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth));
|
||||
if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
|
||||
entry = mesh_fast_tx_get(sdata, mesh_hdr->eaddr1);
|
||||
ether_addr_copy(key.addr, mesh_hdr->eaddr1);
|
||||
else if (!(mesh_hdr->flags & MESH_FLAGS_AE))
|
||||
entry = mesh_fast_tx_get(sdata, skb->data);
|
||||
ether_addr_copy(key.addr, skb->data);
|
||||
else
|
||||
return false;
|
||||
|
||||
entry = mesh_fast_tx_get(sdata, &key);
|
||||
if (!entry)
|
||||
return false;
|
||||
|
||||
@ -3780,6 +3787,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
|
||||
}
|
||||
break;
|
||||
case WLAN_CATEGORY_PROTECTED_EHT:
|
||||
if (len < offsetofend(typeof(*mgmt),
|
||||
u.action.u.ttlm_req.action_code))
|
||||
break;
|
||||
|
||||
switch (mgmt->u.action.u.ttlm_req.action_code) {
|
||||
case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
|
||||
if (sdata->vif.type != NL80211_IFTYPE_STATION)
|
||||
|
@ -648,6 +648,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
|
||||
cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
|
||||
}
|
||||
IEEE80211_SKB_CB(skb)->flags |= tx_flags;
|
||||
IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX;
|
||||
ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
|
||||
}
|
||||
}
|
||||
|
@ -698,11 +698,16 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
|
||||
txrc.bss_conf = &tx->sdata->vif.bss_conf;
|
||||
txrc.skb = tx->skb;
|
||||
txrc.reported_rate.idx = -1;
|
||||
txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
|
||||
|
||||
if (tx->sdata->rc_has_mcs_mask[info->band])
|
||||
txrc.rate_idx_mcs_mask =
|
||||
tx->sdata->rc_rateidx_mcs_mask[info->band];
|
||||
if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) {
|
||||
txrc.rate_idx_mask = ~0;
|
||||
} else {
|
||||
txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
|
||||
|
||||
if (tx->sdata->rc_has_mcs_mask[info->band])
|
||||
txrc.rate_idx_mcs_mask =
|
||||
tx->sdata->rc_rateidx_mcs_mask[info->band];
|
||||
}
|
||||
|
||||
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
|
||||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
|
||||
|
@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
|
||||
if (sctph->source != cp->vport || payload_csum ||
|
||||
skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
sctph->source = cp->vport;
|
||||
sctp_nat_csum(skb, sctph, sctphoff);
|
||||
if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
|
||||
sctp_nat_csum(skb, sctph, sctphoff);
|
||||
} else {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
|
||||
(skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
|
||||
sctph->dest = cp->dport;
|
||||
sctp_nat_csum(skb, sctph, sctphoff);
|
||||
if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
|
||||
sctp_nat_csum(skb, sctph, sctphoff);
|
||||
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
|
@ -338,7 +338,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
|
||||
return;
|
||||
|
||||
if (n > 1) {
|
||||
nf_unregister_net_hook(ctx->net, &found->ops);
|
||||
if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
|
||||
nf_unregister_net_hook(ctx->net, &found->ops);
|
||||
|
||||
list_del_rcu(&found->list);
|
||||
kfree_rcu(found, rcu);
|
||||
return;
|
||||
|
@ -1593,9 +1593,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
|
||||
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
|
||||
struct hlist_head *head = &info->limits[i];
|
||||
struct ovs_ct_limit *ct_limit;
|
||||
struct hlist_node *next;
|
||||
|
||||
hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
|
||||
lockdep_ovsl_is_held())
|
||||
hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
|
||||
kfree_rcu(ct_limit, rcu);
|
||||
}
|
||||
kfree(info->limits);
|
||||
|
@ -215,7 +215,7 @@ static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
|
||||
|
||||
static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
|
||||
{
|
||||
return ctx->strp.msg_ready;
|
||||
return READ_ONCE(ctx->strp.msg_ready);
|
||||
}
|
||||
|
||||
static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
|
||||
|
@ -360,7 +360,7 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
|
||||
if (strp->stm.full_len && strp->stm.full_len == skb->len) {
|
||||
desc->count = 0;
|
||||
|
||||
strp->msg_ready = 1;
|
||||
WRITE_ONCE(strp->msg_ready, 1);
|
||||
tls_rx_msg_ready(strp);
|
||||
}
|
||||
|
||||
@ -528,7 +528,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
|
||||
if (!tls_strp_check_queue_ok(strp))
|
||||
return tls_strp_read_copy(strp, false);
|
||||
|
||||
strp->msg_ready = 1;
|
||||
WRITE_ONCE(strp->msg_ready, 1);
|
||||
tls_rx_msg_ready(strp);
|
||||
|
||||
return 0;
|
||||
@ -580,7 +580,7 @@ void tls_strp_msg_done(struct tls_strparser *strp)
|
||||
else
|
||||
tls_strp_flush_anchor_copy(strp);
|
||||
|
||||
strp->msg_ready = 0;
|
||||
WRITE_ONCE(strp->msg_ready, 0);
|
||||
memset(&strp->stm, 0, sizeof(strp->stm));
|
||||
|
||||
tls_strp_check_rcv(strp);
|
||||
|
@ -299,7 +299,7 @@ static void __unix_gc(struct work_struct *work)
|
||||
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN) {
|
||||
unix_state_lock(sk);
|
||||
unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
|
||||
unix_state_unlock(sk);
|
||||
}
|
||||
}
|
||||
|
@ -14030,6 +14030,8 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
|
||||
error:
|
||||
for (i = 0; i < new_coalesce.n_rules; i++) {
|
||||
tmp_rule = &new_coalesce.rules[i];
|
||||
if (!tmp_rule)
|
||||
continue;
|
||||
for (j = 0; j < tmp_rule->n_patterns; j++)
|
||||
kfree(tmp_rule->patterns[j].mask);
|
||||
kfree(tmp_rule->patterns);
|
||||
|
@ -1758,7 +1758,7 @@ TRACE_EVENT(rdev_return_void_tx_rx,
|
||||
|
||||
DECLARE_EVENT_CLASS(tx_rx_evt,
|
||||
TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
|
||||
TP_ARGS(wiphy, rx, tx),
|
||||
TP_ARGS(wiphy, tx, rx),
|
||||
TP_STRUCT__entry(
|
||||
WIPHY_ENTRY
|
||||
__field(u32, tx)
|
||||
@ -1775,7 +1775,7 @@ DECLARE_EVENT_CLASS(tx_rx_evt,
|
||||
|
||||
DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
|
||||
TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
|
||||
TP_ARGS(wiphy, rx, tx)
|
||||
TP_ARGS(wiphy, tx, rx)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
|
||||
|
@ -182,6 +182,7 @@ class NlMsg:
|
||||
self.done = 1
|
||||
extack_off = 20
|
||||
elif self.nl_type == Netlink.NLMSG_DONE:
|
||||
self.error = struct.unpack("i", self.raw[0:4])[0]
|
||||
self.done = 1
|
||||
extack_off = 4
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user