ice: Remove enum ice_status

Replace uses of ice_status to, as equivalent as possible, error codes.
Remove enum ice_status and its helper conversion function as they are no
longer needed.

Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
This commit is contained in:
Tony Nguyen 2021-10-07 15:58:01 -07:00
parent 5e24d5984c
commit d54699e27d
22 changed files with 568 additions and 654 deletions

View File

@ -953,13 +953,13 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
rel_vmvf_num, NULL); rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an /* if the disable queue command was exercised during an
* active reset flow, ICE_ERR_RESET_ONGOING is returned. * active reset flow, -EBUSY is returned.
* This is not an error as the reset operation disables * This is not an error as the reset operation disables
* queues at the hardware level anyway. * queues at the hardware level anyway.
*/ */
if (status == ICE_ERR_RESET_ONGOING) { if (status == -EBUSY) {
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) { } else if (status == -ENOENT) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) { } else if (status) {
dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",

View File

@ -2,7 +2,6 @@
/* Copyright (c) 2018, Intel Corporation. */ /* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h" #include "ice_common.h"
#include "ice_lib.h"
#include "ice_sched.h" #include "ice_sched.h"
#include "ice_adminq_cmd.h" #include "ice_adminq_cmd.h"
#include "ice_flow.h" #include "ice_flow.h"
@ -19,7 +18,7 @@
static int ice_set_mac_type(struct ice_hw *hw) static int ice_set_mac_type(struct ice_hw *hw)
{ {
if (hw->vendor_id != PCI_VENDOR_ID_INTEL) if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
return ICE_ERR_DEVICE_NOT_SUPPORTED; return -ENODEV;
switch (hw->device_id) { switch (hw->device_id) {
case ICE_DEV_ID_E810C_BACKPLANE: case ICE_DEV_ID_E810C_BACKPLANE:
@ -137,7 +136,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
cmd = &desc.params.mac_read; cmd = &desc.params.mac_read;
if (buf_size < sizeof(*resp)) if (buf_size < sizeof(*resp))
return ICE_ERR_BUF_TOO_SHORT; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
@ -150,7 +149,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
return ICE_ERR_CFG; return -EIO;
} }
/* A single port can report up to two (LAN and WoL) addresses */ /* A single port can report up to two (LAN and WoL) addresses */
@ -190,12 +189,12 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd = &desc.params.get_phy; cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
if (report_mode == ICE_AQC_REPORT_DFLT_CFG && if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
!ice_fw_supports_report_dflt_cfg(hw)) !ice_fw_supports_report_dflt_cfg(hw))
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@ -434,7 +433,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
u16 cmd_flags; u16 cmd_flags;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
li_old = &pi->phy.link_info_old; li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type; hw_media_type = &pi->phy.media_type;
@ -565,7 +564,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
cmd = &desc.params.set_mac_cfg; cmd = &desc.params.set_mac_cfg;
if (max_frame_size == 0) if (max_frame_size == 0)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
@ -590,7 +589,7 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
sw = hw->switch_info; sw = hw->switch_info;
if (!sw) if (!sw)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
INIT_LIST_HEAD(&sw->vsi_list_map_head); INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0; sw->prof_res_bm_init = 0;
@ -676,7 +675,7 @@ static int ice_get_fw_log_cfg(struct ice_hw *hw)
size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
if (!config) if (!config)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
@ -790,7 +789,7 @@ static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
sizeof(*data), sizeof(*data),
GFP_KERNEL); GFP_KERNEL);
if (!data) if (!data)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
val = i << ICE_AQC_FW_LOG_ID_S; val = i << ICE_AQC_FW_LOG_ID_S;
@ -956,7 +955,7 @@ int ice_init_hw(struct ice_hw *hw)
hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->port_info), GFP_KERNEL); sizeof(*hw->port_info), GFP_KERNEL);
if (!hw->port_info) { if (!hw->port_info) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_unroll_cqinit; goto err_unroll_cqinit;
} }
@ -985,7 +984,7 @@ int ice_init_hw(struct ice_hw *hw)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) { if (!pcaps) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_unroll_sched; goto err_unroll_sched;
} }
@ -1006,7 +1005,7 @@ int ice_init_hw(struct ice_hw *hw)
/* need a valid SW entry point to build a Tx tree */ /* need a valid SW entry point to build a Tx tree */
if (!hw->sw_entry_point_layer) { if (!hw->sw_entry_point_layer) {
ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
status = ICE_ERR_CFG; status = -EIO;
goto err_unroll_sched; goto err_unroll_sched;
} }
INIT_LIST_HEAD(&hw->agg_list); INIT_LIST_HEAD(&hw->agg_list);
@ -1026,7 +1025,7 @@ int ice_init_hw(struct ice_hw *hw)
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) { if (!mac_buf) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct; goto err_unroll_fltr_mgmt_struct;
} }
@ -1116,7 +1115,7 @@ int ice_check_reset(struct ice_hw *hw)
if (cnt == grst_timeout) { if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED; return -EIO;
} }
#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
@ -1143,7 +1142,7 @@ int ice_check_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) { if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg); reg);
return ICE_ERR_RESET_FAILED; return -EIO;
} }
return 0; return 0;
@ -1169,7 +1168,7 @@ static int ice_pf_reset(struct ice_hw *hw)
(rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
/* poll on global reset currently in progress until done */ /* poll on global reset currently in progress until done */
if (ice_check_reset(hw)) if (ice_check_reset(hw))
return ICE_ERR_RESET_FAILED; return -EIO;
return 0; return 0;
} }
@ -1194,7 +1193,7 @@ static int ice_pf_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) { if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED; return -EIO;
} }
return 0; return 0;
@ -1228,7 +1227,7 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
val = GLGEN_RTRIG_GLOBR_M; val = GLGEN_RTRIG_GLOBR_M;
break; break;
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
val |= rd32(hw, GLGEN_RTRIG); val |= rd32(hw, GLGEN_RTRIG);
@ -1253,10 +1252,10 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
u8 i; u8 i;
if (!ice_rxq_ctx) if (!ice_rxq_ctx)
return ICE_ERR_BAD_PTR; return -EINVAL;
if (rxq_index > QRX_CTRL_MAX_INDEX) if (rxq_index > QRX_CTRL_MAX_INDEX)
return ICE_ERR_PARAM; return -EINVAL;
/* Copy each dword separately to HW */ /* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
@ -1313,7 +1312,7 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
if (!rlan_ctx) if (!rlan_ctx)
return ICE_ERR_BAD_PTR; return -EINVAL;
rlan_ctx->prefena = 1; rlan_ctx->prefena = 1;
@ -1369,9 +1368,8 @@ static int
ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd) void *buf, u16 buf_size, struct ice_sq_cd *cd)
{ {
return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw), return ice_sq_send_cmd(hw, ice_get_sbq(hw),
(struct ice_aq_desc *)desc, (struct ice_aq_desc *)desc, buf, buf_size, cd);
buf, buf_size, cd));
} }
/** /**
@ -1473,7 +1471,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf) { if (buf) {
buf_cpy = kzalloc(buf_size, GFP_KERNEL); buf_cpy = kzalloc(buf_size, GFP_KERNEL);
if (!buf_cpy) if (!buf_cpy)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
memcpy(&desc_cpy, desc, sizeof(desc_cpy)); memcpy(&desc_cpy, desc, sizeof(desc_cpy));
@ -1601,7 +1599,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
cmd = &desc.params.driver_ver; cmd = &desc.params.driver_ver;
if (!dv) if (!dv)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
@ -1654,12 +1652,12 @@ int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* Requests common resource using the admin queue commands (0x0008). * Requests common resource using the admin queue commands (0x0008).
* When attempting to acquire the Global Config Lock, the driver can * When attempting to acquire the Global Config Lock, the driver can
* learn of three states: * learn of three states:
* 1) ICE_SUCCESS - acquired lock, and can perform download package * 1) 0 - acquired lock, and can perform download package
* 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load * 2) -EIO - did not get lock, driver should fail to load
* 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has * 3) -EALREADY - did not get lock, but another driver has
* successfully downloaded the package; the driver does * successfully downloaded the package; the driver does
* not have to download the package and can continue * not have to download the package and can continue
* loading * loading
* *
* Note that if the caller is in an acquire lock, perform action, release lock * Note that if the caller is in an acquire lock, perform action, release lock
* phase of operation, it is possible that the FW may detect a timeout and issue * phase of operation, it is possible that the FW may detect a timeout and issue
@ -1707,15 +1705,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
} else if (le16_to_cpu(cmd_resp->status) == } else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_IN_PROG) { ICE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout); *timeout = le32_to_cpu(cmd_resp->timeout);
return ICE_ERR_AQ_ERROR; return -EIO;
} else if (le16_to_cpu(cmd_resp->status) == } else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_DONE) { ICE_AQ_RES_GLBL_DONE) {
return ICE_ERR_AQ_NO_WORK; return -EALREADY;
} }
/* invalid FW response, force a timeout immediately */ /* invalid FW response, force a timeout immediately */
*timeout = 0; *timeout = 0;
return ICE_ERR_AQ_ERROR; return -EIO;
} }
/* If the resource is held by some other driver, the command completes /* If the resource is held by some other driver, the command completes
@ -1774,12 +1772,12 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has /* A return code of -EALREADY means that another driver has
* previously acquired the resource and performed any necessary updates; * previously acquired the resource and performed any necessary updates;
* in this case the caller does not obtain the resource and has no * in this case the caller does not obtain the resource and has no
* further work to do. * further work to do.
*/ */
if (status == ICE_ERR_AQ_NO_WORK) if (status == -EALREADY)
goto ice_acquire_res_exit; goto ice_acquire_res_exit;
if (status) if (status)
@ -1792,7 +1790,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0; timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
if (status == ICE_ERR_AQ_NO_WORK) if (status == -EALREADY)
/* lock free, but no work to do */ /* lock free, but no work to do */
break; break;
@ -1800,15 +1798,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* lock acquired */ /* lock acquired */
break; break;
} }
if (status && status != ICE_ERR_AQ_NO_WORK) if (status && status != -EALREADY)
ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
ice_acquire_res_exit: ice_acquire_res_exit:
if (status == ICE_ERR_AQ_NO_WORK) { if (status == -EALREADY) {
if (access == ICE_RES_WRITE) if (access == ICE_RES_WRITE)
ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
else else
ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
} }
return status; return status;
} }
@ -1830,7 +1828,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
/* there are some rare cases when trying to release the resource /* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly * results in an admin queue timeout, so handle them correctly
*/ */
while ((status == ICE_ERR_AQ_TIMEOUT) && while ((status == -EIO) &&
(total_delay < hw->adminq.sq_cmd_timeout)) { (total_delay < hw->adminq.sq_cmd_timeout)) {
mdelay(1); mdelay(1);
status = ice_aq_release_res(hw, res, 0, NULL); status = ice_aq_release_res(hw, res, 0, NULL);
@ -1860,10 +1858,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
cmd = &desc.params.sw_res_ctrl; cmd = &desc.params.sw_res_ctrl;
if (!buf) if (!buf)
return ICE_ERR_PARAM; return -EINVAL;
if (buf_size < flex_array_size(buf, elem, num_entries)) if (buf_size < flex_array_size(buf, elem, num_entries))
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc); ice_fill_dflt_direct_cmd_desc(&desc, opc);
@ -1892,7 +1890,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
buf_len = struct_size(buf, elem, num); buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL); buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Prepare buffer to allocate resource. */ /* Prepare buffer to allocate resource. */
buf->num_elems = cpu_to_le16(num); buf->num_elems = cpu_to_le16(num);
@ -1929,7 +1927,7 @@ int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
buf_len = struct_size(buf, elem, num); buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL); buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Prepare buffer to free resource. */ /* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num); buf->num_elems = cpu_to_le16(num);
@ -2498,7 +2496,7 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
if (opc != ice_aqc_opc_list_func_caps && if (opc != ice_aqc_opc_list_func_caps &&
opc != ice_aqc_opc_list_dev_caps) opc != ice_aqc_opc_list_dev_caps)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc); ice_fill_dflt_direct_cmd_desc(&desc, opc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@ -2526,7 +2524,7 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf) if (!cbuf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the /* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum * device will return, we can simply send a 4KB buffer, the maximum
@ -2560,7 +2558,7 @@ ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf) if (!cbuf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the /* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum * device will return, we can simply send a 4KB buffer, the maximum
@ -2911,7 +2909,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
int status; int status;
if (!cfg) if (!cfg)
return ICE_ERR_PARAM; return -EINVAL;
/* Ensure that only valid bits of cfg->caps can be turned on. */ /* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
@ -2958,7 +2956,7 @@ int ice_update_link_info(struct ice_port_info *pi)
int status; int status;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
li = &pi->phy.link_info; li = &pi->phy.link_info;
@ -2974,7 +2972,7 @@ int ice_update_link_info(struct ice_port_info *pi)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
GFP_KERNEL); GFP_KERNEL);
if (!pcaps) if (!pcaps)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL); pcaps, NULL);
@ -3078,7 +3076,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
u8 pause_mask = 0x0; u8 pause_mask = 0x0;
if (!pi || !cfg) if (!pi || !cfg)
return ICE_ERR_BAD_PTR; return -EINVAL;
switch (req_mode) { switch (req_mode) {
case ICE_FC_FULL: case ICE_FC_FULL:
@ -3126,14 +3124,14 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
struct ice_hw *hw; struct ice_hw *hw;
if (!pi || !aq_failures) if (!pi || !aq_failures)
return ICE_ERR_BAD_PTR; return -EINVAL;
*aq_failures = 0; *aq_failures = 0;
hw = pi->hw; hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) if (!pcaps)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Get the current PHY config */ /* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
@ -3267,13 +3265,13 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
struct ice_hw *hw; struct ice_hw *hw;
if (!pi || !cfg) if (!pi || !cfg)
return ICE_ERR_BAD_PTR; return -EINVAL;
hw = pi->hw; hw = pi->hw;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) if (!pcaps)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, status = ice_aq_get_phy_caps(pi, false,
(ice_fw_supports_report_dflt_cfg(hw) ? (ice_fw_supports_report_dflt_cfg(hw) ?
@ -3313,7 +3311,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
cfg->link_fec_opt |= pcaps->link_fec_options; cfg->link_fec_opt |= pcaps->link_fec_options;
break; break;
default: default:
status = ICE_ERR_PARAM; status = -EINVAL;
break; break;
} }
@ -3350,7 +3348,7 @@ int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
int status = 0; int status = 0;
if (!pi || !link_up) if (!pi || !link_up)
return ICE_ERR_PARAM; return -EINVAL;
phy_info = &pi->phy; phy_info = &pi->phy;
@ -3498,7 +3496,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
int status; int status;
if (!data || (mem_addr & 0xff00)) if (!data || (mem_addr & 0xff00))
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
cmd = &desc.params.read_write_sff_param; cmd = &desc.params.read_write_sff_param;
@ -3537,13 +3535,13 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
u8 *lut; u8 *lut;
if (!params) if (!params)
return ICE_ERR_PARAM; return -EINVAL;
vsi_handle = params->vsi_handle; vsi_handle = params->vsi_handle;
lut = params->lut; lut = params->lut;
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
return ICE_ERR_PARAM; return -EINVAL;
lut_size = params->lut_size; lut_size = params->lut_size;
lut_type = params->lut_type; lut_type = params->lut_type;
@ -3572,7 +3570,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
break; break;
default: default:
status = ICE_ERR_PARAM; status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit; goto ice_aq_get_set_rss_lut_exit;
} }
@ -3607,7 +3605,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
} }
fallthrough; fallthrough;
default: default:
status = ICE_ERR_PARAM; status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit; goto ice_aq_get_set_rss_lut_exit;
} }
@ -3692,7 +3690,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key) struct ice_aqc_get_set_rss_keys *key)
{ {
if (!ice_is_vsi_valid(hw, vsi_handle) || !key) if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
return ICE_ERR_PARAM; return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
key, false); key, false);
@ -3711,7 +3709,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys) struct ice_aqc_get_set_rss_keys *keys)
{ {
if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
return ICE_ERR_PARAM; return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
keys, true); keys, true);
@ -3753,10 +3751,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
if (!qg_list) if (!qg_list)
return ICE_ERR_PARAM; return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM; return -EINVAL;
for (i = 0, list = qg_list; i < num_qgrps; i++) { for (i = 0, list = qg_list; i < num_qgrps; i++) {
sum_size += struct_size(list, txqs, list->num_txqs); sum_size += struct_size(list, txqs, list->num_txqs);
@ -3765,7 +3763,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
} }
if (buf_size != sum_size) if (buf_size != sum_size)
return ICE_ERR_PARAM; return -EINVAL;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@ -3803,10 +3801,10 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
/* qg_list can be NULL only in VM/VF reset flow */ /* qg_list can be NULL only in VM/VF reset flow */
if (!qg_list && !rst_src) if (!qg_list && !rst_src)
return ICE_ERR_PARAM; return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM; return -EINVAL;
cmd->num_entries = num_qgrps; cmd->num_entries = num_qgrps;
@ -3855,7 +3853,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
} }
if (buf_size != sz) if (buf_size != sz)
return ICE_ERR_PARAM; return -EINVAL;
do_aq: do_aq:
status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
@ -3913,8 +3911,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
cmd->num_qset_grps = num_qset_grps; cmd->num_qset_grps = num_qset_grps;
return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
buf_size, cd));
} }
/* End of FW Admin Queue command wrappers */ /* End of FW Admin Queue command wrappers */
@ -4140,7 +4137,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
break; break;
default: default:
return ICE_ERR_INVAL_SIZE; return -EINVAL;
} }
} }
@ -4196,15 +4193,15 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_hw *hw; struct ice_hw *hw;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG; return -EIO;
if (num_qgrps > 1 || buf->num_txqs > 1) if (num_qgrps > 1 || buf->num_txqs > 1)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
hw = pi->hw; hw = pi->hw;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
@ -4212,7 +4209,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
if (!q_ctx) { if (!q_ctx) {
ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
q_handle); q_handle);
status = ICE_ERR_PARAM; status = -EINVAL;
goto ena_txq_exit; goto ena_txq_exit;
} }
@ -4220,7 +4217,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN); ICE_SCHED_NODE_OWNER_LAN);
if (!parent) { if (!parent) {
status = ICE_ERR_PARAM; status = -EINVAL;
goto ena_txq_exit; goto ena_txq_exit;
} }
@ -4295,14 +4292,14 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
int status = ICE_ERR_DOES_NOT_EXIST; int status = -ENOENT;
struct ice_aqc_dis_txq_item *qg_list; struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx; struct ice_q_ctx *q_ctx;
struct ice_hw *hw; struct ice_hw *hw;
u16 i, buf_size; u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG; return -EIO;
hw = pi->hw; hw = pi->hw;
@ -4314,13 +4311,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
if (rst_src) if (rst_src)
return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL); vmvf_num, NULL);
return ICE_ERR_CFG; return -EIO;
} }
buf_size = struct_size(qg_list, q_id, 1); buf_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(buf_size, GFP_KERNEL); qg_list = kzalloc(buf_size, GFP_KERNEL);
if (!qg_list) if (!qg_list)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
@ -4375,10 +4372,10 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u8 i; u8 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG; return -EIO;
if (!ice_is_vsi_valid(pi->hw, vsi_handle)) if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
@ -4427,9 +4424,8 @@ int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs) u16 *max_rdmaqs)
{ {
return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
max_rdmaqs, ICE_SCHED_NODE_OWNER_RDMA);
ICE_SCHED_NODE_OWNER_RDMA));
} }
/** /**
@ -4504,7 +4500,7 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
&node); &node);
if (status) { if (status) {
ret = ice_status_to_errno(status); ret = status;
break; break;
} }
qset_teid[i] = le32_to_cpu(node.node_teid); qset_teid[i] = le32_to_cpu(node.node_teid);
@ -4567,7 +4563,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
mutex_unlock(&pi->sched_lock); mutex_unlock(&pi->sched_lock);
kfree(qg_list); kfree(qg_list);
return ice_status_to_errno(status); return status;
} }
/** /**
@ -4608,7 +4604,7 @@ int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
int status; int status;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
/* Replay pre-initialization if there is any */ /* Replay pre-initialization if there is any */
if (vsi_handle == ICE_MAIN_VSI_HANDLE) { if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
@ -4774,7 +4770,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
cmd->param_indx = idx; cmd->param_indx = idx;
cmd->param_val = cpu_to_le32(value); cmd->param_val = cpu_to_le32(value);
return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
/** /**
@ -4809,7 +4805,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status) if (status)
return ice_status_to_errno(status); return status;
*value = le32_to_cpu(cmd->param_val); *value = le32_to_cpu(cmd->param_val);
@ -4839,7 +4835,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
cmd->gpio_num = pin_idx; cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0; cmd->gpio_val = value ? 1 : 0;
return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
/** /**
@ -4868,7 +4864,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status) if (status)
return ice_status_to_errno(status); return status;
*value = !!cmd->gpio_val; *value = !!cmd->gpio_val;
return 0; return 0;
@ -5003,7 +4999,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
cmd = &desc.params.lldp_set_mib; cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf) if (buf_size == 0 || !buf)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);

View File

@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->sq.desc_buf.pa, &cq->sq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!cq->sq.desc_buf.va) if (!cq->sq.desc_buf.va)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->sq.desc_buf.size = size; cq->sq.desc_buf.size = size;
cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.desc_buf.va = NULL; cq->sq.desc_buf.va = NULL;
cq->sq.desc_buf.pa = 0; cq->sq.desc_buf.pa = 0;
cq->sq.desc_buf.size = 0; cq->sq.desc_buf.size = 0;
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
return 0; return 0;
@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->rq.desc_buf.pa, &cq->rq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!cq->rq.desc_buf.va) if (!cq->rq.desc_buf.va)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->rq.desc_buf.size = size; cq->rq.desc_buf.size = size;
return 0; return 0;
} }
@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
sizeof(cq->rq.desc_buf), GFP_KERNEL); sizeof(cq->rq.desc_buf), GFP_KERNEL);
if (!cq->rq.dma_head) if (!cq->rq.dma_head)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
@ -218,7 +218,7 @@ unwind_alloc_rq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL; cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
/** /**
@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
sizeof(cq->sq.desc_buf), GFP_KERNEL); sizeof(cq->sq.desc_buf), GFP_KERNEL);
if (!cq->sq.dma_head) if (!cq->sq.dma_head)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
/* allocate the mapped buffers */ /* allocate the mapped buffers */
@ -266,7 +266,7 @@ unwind_alloc_sq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL; cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
static int static int
@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
/* Check one register to verify that config was applied */ /* Check one register to verify that config was applied */
if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
return ICE_ERR_AQ_ERROR; return -EIO;
return 0; return 0;
} }
@ -367,13 +367,13 @@ static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->sq.count > 0) { if (cq->sq.count > 0) {
/* queue already initialized */ /* queue already initialized */
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
/* verify input for valid configuration */ /* verify input for valid configuration */
if (!cq->num_sq_entries || !cq->sq_buf_size) { if (!cq->num_sq_entries || !cq->sq_buf_size) {
ret_code = ICE_ERR_CFG; ret_code = -EIO;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
@ -427,13 +427,13 @@ static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->rq.count > 0) { if (cq->rq.count > 0) {
/* queue already initialized */ /* queue already initialized */
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
/* verify input for valid configuration */ /* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->rq_buf_size) { if (!cq->num_rq_entries || !cq->rq_buf_size) {
ret_code = ICE_ERR_CFG; ret_code = -EIO;
goto init_ctrlq_exit; goto init_ctrlq_exit;
} }
@ -482,7 +482,7 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
mutex_lock(&cq->sq_lock); mutex_lock(&cq->sq_lock);
if (!cq->sq.count) { if (!cq->sq.count) {
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto shutdown_sq_out; goto shutdown_sq_out;
} }
@ -549,7 +549,7 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
mutex_lock(&cq->rq_lock); mutex_lock(&cq->rq_lock);
if (!cq->rq.count) { if (!cq->rq.count) {
ret_code = ICE_ERR_NOT_READY; ret_code = -EBUSY;
goto shutdown_rq_out; goto shutdown_rq_out;
} }
@ -586,7 +586,7 @@ static int ice_init_check_adminq(struct ice_hw *hw)
goto init_ctrlq_free_rq; goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw)) { if (!ice_aq_ver_check(hw)) {
status = ICE_ERR_FW_API_VER; status = -EIO;
goto init_ctrlq_free_rq; goto init_ctrlq_free_rq;
} }
@ -631,14 +631,14 @@ static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
cq = &hw->mailboxq; cq = &hw->mailboxq;
break; break;
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
cq->qtype = q_type; cq->qtype = q_type;
/* verify input for valid configuration */ /* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->num_sq_entries || if (!cq->num_rq_entries || !cq->num_sq_entries ||
!cq->rq_buf_size || !cq->sq_buf_size) { !cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG; return -EIO;
} }
/* setup SQ command write back timeout */ /* setup SQ command write back timeout */
@ -763,7 +763,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw)
return status; return status;
status = ice_init_check_adminq(hw); status = ice_init_check_adminq(hw);
if (status != ICE_ERR_AQ_FW_CRITICAL) if (status != -EIO)
break; break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
@ -978,19 +978,19 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* if reset is in progress return a soft error */ /* if reset is in progress return a soft error */
if (hw->reset_ongoing) if (hw->reset_ongoing)
return ICE_ERR_RESET_ONGOING; return -EBUSY;
mutex_lock(&cq->sq_lock); mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK; cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) { if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
status = ICE_ERR_AQ_EMPTY; status = -EIO;
goto sq_send_command_error; goto sq_send_command_error;
} }
if ((buf && !buf_size) || (!buf && buf_size)) { if ((buf && !buf_size) || (!buf && buf_size)) {
status = ICE_ERR_PARAM; status = -EINVAL;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -998,7 +998,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf_size > cq->sq_buf_size) { if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size); buf_size);
status = ICE_ERR_INVAL_SIZE; status = -EINVAL;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -1011,7 +1011,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (val >= cq->num_sq_entries) { if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val); val);
status = ICE_ERR_AQ_EMPTY; status = -EIO;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -1028,7 +1028,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*/ */
if (ice_clean_sq(hw, cq) == 0) { if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
status = ICE_ERR_AQ_FULL; status = -ENOSPC;
goto sq_send_command_error; goto sq_send_command_error;
} }
@ -1082,7 +1082,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (copy_size > buf_size) { if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size); copy_size, buf_size);
status = ICE_ERR_AQ_ERROR; status = -EIO;
} else { } else {
memcpy(buf, dma_buf->va, copy_size); memcpy(buf, dma_buf->va, copy_size);
} }
@ -1098,7 +1098,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
} }
cmd_completed = true; cmd_completed = true;
if (!status && retval != ICE_AQ_RC_OK) if (!status && retval != ICE_AQ_RC_OK)
status = ICE_ERR_AQ_ERROR; status = -EIO;
cq->sq_last_status = (enum ice_aq_err)retval; cq->sq_last_status = (enum ice_aq_err)retval;
} }
@ -1116,10 +1116,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
status = ICE_ERR_AQ_FW_CRITICAL; status = -EIO;
} else { } else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT; status = -EIO;
} }
} }
@ -1176,7 +1176,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (!cq->rq.count) { if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
ret_code = ICE_ERR_AQ_EMPTY; ret_code = -EIO;
goto clean_rq_elem_err; goto clean_rq_elem_err;
} }
@ -1185,7 +1185,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ntu == ntc) { if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */ /* nothing to do - shouldn't need to update ring's values */
ret_code = ICE_ERR_AQ_NO_WORK; ret_code = -EALREADY;
goto clean_rq_elem_out; goto clean_rq_elem_out;
} }
@ -1196,7 +1196,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags); flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) { if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR; ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status); le16_to_cpu(desc->opcode), rq_last_status);
} }

View File

@ -2,7 +2,6 @@
/* Copyright (c) 2019, Intel Corporation. */ /* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h" #include "ice_common.h"
#include "ice_lib.h"
#include "ice_sched.h" #include "ice_sched.h"
#include "ice_dcb.h" #include "ice_dcb.h"
@ -31,7 +30,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
cmd = &desc.params.lldp_get_mib; cmd = &desc.params.lldp_get_mib;
if (buf_size == 0 || !buf) if (buf_size == 0 || !buf)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
@ -609,7 +608,7 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
u16 len; u16 len;
if (!lldpmib || !dcbcfg) if (!lldpmib || !dcbcfg)
return ICE_ERR_PARAM; return -EINVAL;
/* set to the start of LLDPDU */ /* set to the start of LLDPDU */
lldpmib += ETH_HLEN; lldpmib += ETH_HLEN;
@ -659,7 +658,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
/* Allocate the LLDPDU */ /* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) if (!lldpmib)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL); ICE_LLDPDU_SIZE, NULL, NULL, NULL);
@ -684,7 +683,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* Start/Stop the embedded dcbx Agent. In case that this wrapper function * Start/Stop the embedded dcbx Agent. In case that this wrapper function
* returns ICE_SUCCESS, caller will need to check if FW returns back the same * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09) * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/ */
int int
@ -762,7 +761,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status) if (status)
return ice_status_to_errno(status); return status;
/* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
@ -910,7 +909,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
int ret; int ret;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
if (dcbx_mode == ICE_DCBX_MODE_IEEE) if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@ -950,7 +949,7 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
int ret; int ret;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) { if (!ret) {
@ -980,7 +979,7 @@ int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
int ret = 0; int ret = 0;
if (!hw->func_caps.common_cap.dcb) if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
qos_cfg->is_sw_lldp = true; qos_cfg->is_sw_lldp = true;
@ -996,7 +995,7 @@ int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
return ret; return ret;
qos_cfg->is_sw_lldp = false; qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY; return -EBUSY;
} }
/* Configure the LLDP MIB change event */ /* Configure the LLDP MIB change event */
@ -1022,13 +1021,13 @@ int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
int ret; int ret;
if (!hw->func_caps.common_cap.dcb) if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED; return -EOPNOTSUPP;
/* Get DCBX status */ /* Get DCBX status */
qos_cfg->dcbx_status = ice_get_dcbx_status(hw); qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY; return -EBUSY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret) if (!ret)
@ -1478,7 +1477,7 @@ int ice_set_dcb_cfg(struct ice_port_info *pi)
u16 miblen; u16 miblen;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
@ -1487,7 +1486,7 @@ int ice_set_dcb_cfg(struct ice_port_info *pi)
/* Allocate the LLDPDU */ /* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) if (!lldpmib)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
@ -1521,7 +1520,7 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
int status; int status;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.port_ets; cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid; cmd->port_teid = pi->root->info.node_teid;
@ -1548,7 +1547,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
u8 i, j; u8 i, j;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
/* suspend the missing TC nodes */ /* suspend the missing TC nodes */
for (i = 0; i < pi->root->num_children; i++) { for (i = 0; i < pi->root->num_children; i++) {
teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);

View File

@ -584,19 +584,19 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
TNL_SEG_CNT(tun), &prof); TNL_SEG_CNT(tun), &prof);
if (status) if (status)
return ice_status_to_errno(status); return status;
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL, main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h); seg, &entry1_h);
if (status) { if (status) {
err = ice_status_to_errno(status); err = status;
goto err_prof; goto err_prof;
} }
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h); seg, &entry2_h);
if (status) { if (status) {
err = ice_status_to_errno(status); err = status;
goto err_entry; goto err_entry;
} }
@ -1211,7 +1211,7 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
ice_fdir_get_prgm_desc(hw, input, &desc, add); ice_fdir_get_prgm_desc(hw, input, &desc, add);
status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (status) { if (status) {
err = ice_status_to_errno(status); err = status;
goto err_free_all; goto err_free_all;
} }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
@ -1226,7 +1226,7 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
is_tun); is_tun);
if (status) { if (status) {
err = ice_status_to_errno(status); err = status;
goto err_frag; goto err_frag;
} }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);

View File

@ -919,15 +919,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (ice_fdir_pkt[idx].flow == flow) if (ice_fdir_pkt[idx].flow == flow)
break; break;
if (idx == ICE_FDIR_NUM_PKT) if (idx == ICE_FDIR_NUM_PKT)
return ICE_ERR_PARAM; return -EINVAL;
if (!tun) { if (!tun) {
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt; loc = pkt;
} else { } else {
if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL)) if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
if (!ice_fdir_pkt[idx].tun_pkt) if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM; return -EINVAL;
memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
ice_fdir_pkt[idx].tun_pkt_len); ice_fdir_pkt[idx].tun_pkt_len);
ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET, ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
@ -1111,7 +1111,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break; break;
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
if (input->flex_fltr) if (input->flex_fltr)

View File

@ -440,7 +440,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
memset(&state, 0, sizeof(state)); memset(&state, 0, sizeof(state));
if (!ice_seg) if (!ice_seg)
return ICE_ERR_PARAM; return -EINVAL;
do { do {
tcam = ice_pkg_enum_entry(ice_seg, &state, tcam = ice_pkg_enum_entry(ice_seg, &state,
@ -455,7 +455,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
} while (tcam); } while (tcam);
*entry = NULL; *entry = NULL;
return ICE_ERR_CFG; return -EIO;
} }
/** /**
@ -630,7 +630,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
/* 'dont_care' and 'nvr_mtch' masks cannot overlap */ /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
return ICE_ERR_CFG; return -EIO;
*key = 0; *key = 0;
*key_inv = 0; *key_inv = 0;
@ -732,11 +732,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
/* size must be a multiple of 2 bytes. */ /* size must be a multiple of 2 bytes. */
if (size % 2) if (size % 2)
return ICE_ERR_CFG; return -EIO;
half_size = size / 2; half_size = size / 2;
if (off + len > half_size) if (off + len > half_size)
return ICE_ERR_CFG; return -EIO;
/* Make sure at most one bit is set in the never match mask. Having more /* Make sure at most one bit is set in the never match mask. Having more
* than one never match mask bit set will cause HW to consume excessive * than one never match mask bit set will cause HW to consume excessive
@ -744,13 +744,13 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
*/ */
#define ICE_NVR_MTCH_BITS_MAX 1 #define ICE_NVR_MTCH_BITS_MAX 1
if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
return ICE_ERR_CFG; return -EIO;
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
dc ? dc[i] : 0, nm ? nm[i] : 0, dc ? dc[i] : 0, nm ? nm[i] : 0,
key + off + i, key + half_size + off + i)) key + off + i, key + half_size + off + i))
return ICE_ERR_CFG; return -EIO;
return 0; return 0;
} }
@ -764,12 +764,12 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
* or writing of the package. When attempting to obtain write access, the * or writing of the package. When attempting to obtain write access, the
* caller must check for the following two return values: * caller must check for the following two return values:
* *
* ICE_SUCCESS - Means the caller has acquired the global config lock * 0 - Means the caller has acquired the global config lock
* and can perform writing of the package. * and can perform writing of the package.
* ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the * -EALREADY - Indicates another driver has already written the
* package or has found that no update was necessary; in * package or has found that no update was necessary; in
* this case, the caller can just skip performing any * this case, the caller can just skip performing any
* update of the package. * update of the package.
*/ */
static int static int
ice_acquire_global_cfg_lock(struct ice_hw *hw, ice_acquire_global_cfg_lock(struct ice_hw *hw,
@ -782,7 +782,7 @@ ice_acquire_global_cfg_lock(struct ice_hw *hw,
if (!status) if (!status)
mutex_lock(&ice_global_cfg_lock_sw); mutex_lock(&ice_global_cfg_lock_sw);
else if (status == ICE_ERR_AQ_NO_WORK) else if (status == -EALREADY)
ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
return status; return status;
@ -859,7 +859,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
if (status == ICE_ERR_AQ_ERROR) { if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */ /* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp; struct ice_aqc_download_pkg_resp *resp;
@ -907,7 +907,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
if (status == ICE_ERR_AQ_ERROR) { if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */ /* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp; struct ice_aqc_download_pkg_resp *resp;
@ -1041,7 +1041,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
if (status) { if (status) {
if (status == ICE_ERR_AQ_NO_WORK) if (status == -EALREADY)
return ICE_DDP_PKG_ALREADY_LOADED; return ICE_DDP_PKG_ALREADY_LOADED;
return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
} }
@ -1469,7 +1469,7 @@ static int ice_get_prof_index_max(struct ice_hw *hw)
memset(&state, 0, sizeof(state)); memset(&state, 0, sizeof(state));
if (!hw->seg) if (!hw->seg)
return ICE_ERR_PARAM; return -EINVAL;
ice_seg = hw->seg; ice_seg = hw->seg;
@ -1795,7 +1795,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state)); memset(&state, 0, sizeof(state));
if (!ids_cnt || !hw->seg) if (!ids_cnt || !hw->seg)
return ICE_ERR_PARAM; return -EINVAL;
ice_seg = hw->seg; ice_seg = hw->seg;
do { do {
@ -1839,7 +1839,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
} }
} while (fv); } while (fv);
if (list_empty(fv_list)) if (list_empty(fv_list))
return ICE_ERR_CFG; return -EIO;
return 0; return 0;
err: err:
@ -1848,7 +1848,7 @@ err:
devm_kfree(ice_hw_to_dev(hw), fvl); devm_kfree(ice_hw_to_dev(hw), fvl);
} }
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
/** /**
@ -1924,17 +1924,17 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
u16 data_end; u16 data_end;
if (!bld) if (!bld)
return ICE_ERR_PARAM; return -EINVAL;
buf = (struct ice_buf_hdr *)&bld->buf; buf = (struct ice_buf_hdr *)&bld->buf;
/* already an active section, can't increase table size */ /* already an active section, can't increase table size */
section_count = le16_to_cpu(buf->section_count); section_count = le16_to_cpu(buf->section_count);
if (section_count > 0) if (section_count > 0)
return ICE_ERR_CFG; return -EIO;
if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
return ICE_ERR_CFG; return -EIO;
bld->reserved_section_table_entries += count; bld->reserved_section_table_entries += count;
data_end = le16_to_cpu(buf->data_end) + data_end = le16_to_cpu(buf->data_end) +
@ -2101,14 +2101,14 @@ ice_create_tunnel(struct ice_hw *hw, u16 index,
enum ice_tunnel_type type, u16 port) enum ice_tunnel_type type, u16 port)
{ {
struct ice_boost_tcam_section *sect_rx, *sect_tx; struct ice_boost_tcam_section *sect_rx, *sect_tx;
int status = ICE_ERR_MAX_LIMIT; int status = -ENOSPC;
struct ice_buf_build *bld; struct ice_buf_build *bld;
mutex_lock(&hw->tnl_lock); mutex_lock(&hw->tnl_lock);
bld = ice_pkg_buf_alloc(hw); bld = ice_pkg_buf_alloc(hw);
if (!bld) { if (!bld) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto ice_create_tunnel_end; goto ice_create_tunnel_end;
} }
@ -2172,7 +2172,7 @@ ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
u16 port) u16 port)
{ {
struct ice_boost_tcam_section *sect_rx, *sect_tx; struct ice_boost_tcam_section *sect_rx, *sect_tx;
int status = ICE_ERR_MAX_LIMIT; int status = -ENOSPC;
struct ice_buf_build *bld; struct ice_buf_build *bld;
mutex_lock(&hw->tnl_lock); mutex_lock(&hw->tnl_lock);
@ -2180,13 +2180,13 @@ ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
if (WARN_ON(!hw->tnl.tbl[index].valid || if (WARN_ON(!hw->tnl.tbl[index].valid ||
hw->tnl.tbl[index].type != type || hw->tnl.tbl[index].type != type ||
hw->tnl.tbl[index].port != port)) { hw->tnl.tbl[index].port != port)) {
status = ICE_ERR_OUT_OF_RANGE; status = -EIO;
goto ice_destroy_tunnel_end; goto ice_destroy_tunnel_end;
} }
bld = ice_pkg_buf_alloc(hw); bld = ice_pkg_buf_alloc(hw);
if (!bld) { if (!bld) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto ice_destroy_tunnel_end; goto ice_destroy_tunnel_end;
} }
@ -2289,10 +2289,10 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
struct ice_fv_word *fv_ext; struct ice_fv_word *fv_ext;
if (prof >= hw->blk[blk].es.count) if (prof >= hw->blk[blk].es.count)
return ICE_ERR_PARAM; return -EINVAL;
if (fv_idx >= hw->blk[blk].es.fvw) if (fv_idx >= hw->blk[blk].es.fvw)
return ICE_ERR_PARAM; return -EINVAL;
fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
@ -2319,7 +2319,7 @@ static int
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{ {
if (ptype >= ICE_XLT1_CNT || !ptg) if (ptype >= ICE_XLT1_CNT || !ptg)
return ICE_ERR_PARAM; return -EINVAL;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
return 0; return 0;
@ -2356,14 +2356,14 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
struct ice_ptg_ptype *p; struct ice_ptg_ptype *p;
if (ptype > ICE_XLT1_CNT - 1) if (ptype > ICE_XLT1_CNT - 1)
return ICE_ERR_PARAM; return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
/* Should not happen if .in_use is set, bad config */ /* Should not happen if .in_use is set, bad config */
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
return ICE_ERR_CFG; return -EIO;
/* find the ptype within this PTG, and bypass the link over it */ /* find the ptype within this PTG, and bypass the link over it */
p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
@ -2403,10 +2403,10 @@ ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
u8 original_ptg; u8 original_ptg;
if (ptype > ICE_XLT1_CNT - 1) if (ptype > ICE_XLT1_CNT - 1)
return ICE_ERR_PARAM; return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
if (status) if (status)
@ -2545,7 +2545,7 @@ static int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{ {
if (!vsig || vsi >= ICE_MAX_VSI) if (!vsig || vsi >= ICE_MAX_VSI)
return ICE_ERR_PARAM; return -EINVAL;
/* As long as there's a default or valid VSIG associated with the input /* As long as there's a default or valid VSIG associated with the input
* VSI, the functions returns a success. Any handling of VSIG will be * VSI, the functions returns a success. Any handling of VSIG will be
@ -2624,7 +2624,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
return 0; return 0;
} }
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
/** /**
@ -2645,10 +2645,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M; idx = vsig & ICE_VSIG_IDX_M;
if (idx >= ICE_MAX_VSIGS) if (idx >= ICE_MAX_VSIGS)
return ICE_ERR_PARAM; return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
@ -2706,10 +2706,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M; idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
return ICE_ERR_PARAM; return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
/* entry already in default VSIG, don't have to remove */ /* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG) if (idx == ICE_DEFAULT_VSIG)
@ -2717,7 +2717,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head)) if (!(*vsi_head))
return ICE_ERR_CFG; return -EIO;
vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
vsi_cur = (*vsi_head); vsi_cur = (*vsi_head);
@ -2734,7 +2734,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* verify if VSI was removed from group list */ /* verify if VSI was removed from group list */
if (!vsi_cur) if (!vsi_cur)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->vsig = ICE_DEFAULT_VSIG;
vsi_cur->changed = 1; vsi_cur->changed = 1;
@ -2765,14 +2765,14 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M; idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
return ICE_ERR_PARAM; return -EINVAL;
/* if VSIG not in use and VSIG is not default type this VSIG /* if VSIG not in use and VSIG is not default type this VSIG
* doesn't exist. * doesn't exist.
*/ */
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
vsig != ICE_DEFAULT_VSIG) vsig != ICE_DEFAULT_VSIG)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (status) if (status)
@ -2889,7 +2889,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
* field vector and mask. This will cause rule interference. * field vector and mask. This will cause rule interference.
*/ */
if (blk == ICE_BLK_FD) if (blk == ICE_BLK_FD)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
for (i = 0; i < (u8)es->count; i++) { for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw; u16 off = i * es->fvw;
@ -2905,7 +2905,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
return 0; return 0;
} }
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
/** /**
@ -2965,7 +2965,7 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 res_type; u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type)) if (!ice_tcam_ent_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM; return -EINVAL;
return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
} }
@ -2984,7 +2984,7 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
u16 res_type; u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type)) if (!ice_tcam_ent_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM; return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tcam_idx); return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
} }
@ -3006,7 +3006,7 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
u16 get_prof; u16 get_prof;
if (!ice_prof_id_rsrc_type(blk, &res_type)) if (!ice_prof_id_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM; return -EINVAL;
status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
if (!status) if (!status)
@ -3030,7 +3030,7 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
u16 res_type; u16 res_type;
if (!ice_prof_id_rsrc_type(blk, &res_type)) if (!ice_prof_id_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM; return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
} }
@ -3045,7 +3045,7 @@ static int
ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{ {
if (prof_id > hw->blk[blk].es.count) if (prof_id > hw->blk[blk].es.count)
return ICE_ERR_PARAM; return -EINVAL;
hw->blk[blk].es.ref_count[prof_id]++; hw->blk[blk].es.ref_count[prof_id]++;
@ -3167,12 +3167,12 @@ ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
u16 *mask_idx) u16 *mask_idx)
{ {
bool found_unused = false, found_copy = false; bool found_unused = false, found_copy = false;
int status = ICE_ERR_MAX_LIMIT; int status = -ENOSPC;
u16 unused_idx = 0, copy_idx = 0; u16 unused_idx = 0, copy_idx = 0;
u16 i; u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&hw->blk[blk].masks.lock); mutex_lock(&hw->blk[blk].masks.lock);
@ -3234,11 +3234,11 @@ static int
ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
{ {
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
return ICE_ERR_PARAM; return -EINVAL;
if (!(mask_idx >= hw->blk[blk].masks.first && if (!(mask_idx >= hw->blk[blk].masks.first &&
mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
mutex_lock(&hw->blk[blk].masks.lock); mutex_lock(&hw->blk[blk].masks.lock);
@ -3279,7 +3279,7 @@ ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
u16 i; u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
return ICE_ERR_PARAM; return -EINVAL;
mask_bm = hw->blk[blk].es.mask_ena[prof_id]; mask_bm = hw->blk[blk].es.mask_ena[prof_id];
for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
@ -3364,7 +3364,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
if (ena_mask & BIT(i)) if (ena_mask & BIT(i))
ice_free_prof_mask(hw, blk, i); ice_free_prof_mask(hw, blk, i);
return ICE_ERR_OUT_OF_RANGE; return -EIO;
} }
/* enable the masks for this profile */ /* enable the masks for this profile */
@ -3410,7 +3410,7 @@ static int
ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{ {
if (prof_id > hw->blk[blk].es.count) if (prof_id > hw->blk[blk].es.count)
return ICE_ERR_PARAM; return -EINVAL;
if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) { if (!--hw->blk[blk].es.ref_count[prof_id]) {
@ -3967,7 +3967,7 @@ int ice_init_hw_tbls(struct ice_hw *hw)
err: err:
ice_free_hw_tbls(hw); ice_free_hw_tbls(hw);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
/** /**
@ -4075,7 +4075,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
*refs = 0; *refs = 0;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
while (ptr) { while (ptr) {
@ -4136,7 +4136,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
sizeof(p->es[0])); sizeof(p->es[0]));
if (!p) if (!p)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
p->count = cpu_to_le16(1); p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->prof_id); p->offset = cpu_to_le16(tmp->prof_id);
@ -4170,7 +4170,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct_size(p, entry, 1)); struct_size(p, entry, 1));
if (!p) if (!p)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
p->count = cpu_to_le16(1); p->count = cpu_to_le16(1);
p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
@ -4206,7 +4206,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1)); struct_size(p, value, 1));
if (!p) if (!p)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
p->count = cpu_to_le16(1); p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->ptype); p->offset = cpu_to_le16(tmp->ptype);
@ -4241,7 +4241,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1)); struct_size(p, value, 1));
if (!p) if (!p)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
p->count = cpu_to_le16(1); p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->vsi); p->offset = cpu_to_le16(tmp->vsi);
@ -4304,7 +4304,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
/* Build update package buffer */ /* Build update package buffer */
b = ice_pkg_buf_alloc(hw); b = ice_pkg_buf_alloc(hw);
if (!b) if (!b)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
status = ice_pkg_buf_reserve_section(b, sects); status = ice_pkg_buf_reserve_section(b, sects);
if (status) if (status)
@ -4341,13 +4341,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
*/ */
pkg_sects = ice_pkg_buf_get_active_sections(b); pkg_sects = ice_pkg_buf_get_active_sections(b);
if (!pkg_sects || pkg_sects != sects) { if (!pkg_sects || pkg_sects != sects) {
status = ICE_ERR_INVAL_SIZE; status = -EINVAL;
goto error_tmp; goto error_tmp;
} }
/* update package */ /* update package */
status = ice_update_pkg(hw, ice_pkg_buf(b), 1); status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
if (status == ICE_ERR_AQ_ERROR) if (status == -EIO)
ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
error_tmp: error_tmp:
@ -4468,7 +4468,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
/* check for room */ /* check for room */
if (first_free + 1 < (s8)ice_fd_pairs[index].count) if (first_free + 1 < (s8)ice_fd_pairs[index].count)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
/* place in extraction sequence */ /* place in extraction sequence */
for (k = 0; k < ice_fd_pairs[index].count; k++) { for (k = 0; k < ice_fd_pairs[index].count; k++) {
@ -4478,7 +4478,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
ice_fd_pairs[index].off + (k * 2); ice_fd_pairs[index].off + (k * 2);
if (k > first_free) if (k > first_free)
return ICE_ERR_OUT_OF_RANGE; return -EIO;
/* keep track of non-relevant fields */ /* keep track of non-relevant fields */
mask_sel |= BIT(first_free - k); mask_sel |= BIT(first_free - k);
@ -4605,11 +4605,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
&prof->attr[prof->ptg_cnt]); &prof->attr[prof->ptg_cnt]);
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
} }
if (!found) if (!found)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
return 0; return 0;
} }
@ -4678,7 +4678,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */ /* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL); prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
if (!prof) { if (!prof) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_ice_add_prof; goto err_ice_add_prof;
} }
@ -4721,7 +4721,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
*/ */
status = ice_add_prof_attrib(prof, ptg, ptype, status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt); attr, attr_cnt);
if (status == ICE_ERR_MAX_LIMIT) if (status == -ENOSPC)
break; break;
if (status) { if (status) {
/* This is simple a PTYPE/PTG with no /* This is simple a PTYPE/PTG with no
@ -4838,7 +4838,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
status = ice_rel_tcam_idx(hw, blk, status = ice_rel_tcam_idx(hw, blk,
prof->tcam[i].tcam_idx); prof->tcam[i].tcam_idx);
if (status) if (status)
return ICE_ERR_HW_TABLE; return -EIO;
} }
return 0; return 0;
@ -4885,7 +4885,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL); GFP_KERNEL);
if (!p) if (!p)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
p->type = ICE_VSIG_REM; p->type = ICE_VSIG_REM;
p->orig_vsig = vsig; p->orig_vsig = vsig;
@ -4932,7 +4932,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
return status; return status;
} }
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
/** /**
@ -4991,7 +4991,7 @@ int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
pmap = ice_search_prof_id(hw, blk, id); pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) { if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto err_ice_rem_prof; goto err_ice_rem_prof;
} }
@ -5031,7 +5031,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
/* Get the details on the profile specified by the handle ID */ /* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl); map = ice_search_prof_id(hw, blk, hdl);
if (!map) { if (!map) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto err_ice_get_prof; goto err_ice_get_prof;
} }
@ -5041,7 +5041,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL); GFP_KERNEL);
if (!p) { if (!p) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_ice_get_prof; goto err_ice_get_prof;
} }
@ -5101,7 +5101,7 @@ err_ice_get_profs_vsig:
devm_kfree(ice_hw_to_dev(hw), ent1); devm_kfree(ice_hw_to_dev(hw), ent1);
} }
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
/** /**
@ -5123,13 +5123,13 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
mutex_lock(&hw->blk[blk].es.prof_map_lock); mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl); map = ice_search_prof_id(hw, blk, hdl);
if (!map) { if (!map) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto err_ice_add_prof_to_lst; goto err_ice_add_prof_to_lst;
} }
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) { if (!p) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_ice_add_prof_to_lst; goto err_ice_add_prof_to_lst;
} }
@ -5168,7 +5168,7 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) if (!p)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (!status) if (!status)
@ -5257,7 +5257,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
/* add TCAM to change list */ /* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) if (!p)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags, tcam->ptg, vsig, 0, tcam->attr.flags,
@ -5378,18 +5378,18 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
/* Error, if this VSIG already has this profile */ /* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl)) if (ice_has_prof_vsig(hw, blk, vsig, hdl))
return ICE_ERR_ALREADY_EXISTS; return -EEXIST;
/* new VSIG profile structure */ /* new VSIG profile structure */
t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL); t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
if (!t) if (!t)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
mutex_lock(&hw->blk[blk].es.prof_map_lock); mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */ /* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl); map = ice_search_prof_id(hw, blk, hdl);
if (!map) { if (!map) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto err_ice_add_prof_id_vsig; goto err_ice_add_prof_id_vsig;
} }
@ -5404,7 +5404,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
/* add TCAM to change list */ /* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) { if (!p) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_ice_add_prof_id_vsig; goto err_ice_add_prof_id_vsig;
} }
@ -5484,11 +5484,11 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) if (!p)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
new_vsig = ice_vsig_alloc(hw, blk); new_vsig = ice_vsig_alloc(hw, blk);
if (!new_vsig) { if (!new_vsig) {
status = ICE_ERR_HW_TABLE; status = -EIO;
goto err_ice_create_prof_id_vsig; goto err_ice_create_prof_id_vsig;
} }
@ -5535,7 +5535,7 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
vsig = ice_vsig_alloc(hw, blk); vsig = ice_vsig_alloc(hw, blk);
if (!vsig) if (!vsig)
return ICE_ERR_HW_TABLE; return -EIO;
status = ice_move_vsi(hw, blk, vsi, vsig, chg); status = ice_move_vsi(hw, blk, vsi, vsig, chg);
if (status) if (status)
@ -5629,7 +5629,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* scenario * scenario
*/ */
if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
status = ICE_ERR_ALREADY_EXISTS; status = -EEXIST;
goto err_ice_add_prof_id_flow; goto err_ice_add_prof_id_flow;
} }
@ -5749,7 +5749,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
return 0; return 0;
} }
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
/** /**
@ -5864,7 +5864,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
} }
} }
} else { } else {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
} }
/* update hardware tables */ /* update hardware tables */

View File

@ -634,12 +634,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Multiple L3 headers */ /* Multiple L3 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
return ICE_ERR_PARAM; return -EINVAL;
/* Multiple L4 headers */ /* Multiple L4 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
return ICE_ERR_PARAM; return -EINVAL;
} }
return 0; return 0;
@ -1035,7 +1035,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
prot_id = ICE_PROT_GRE_OF; prot_id = ICE_PROT_GRE_OF;
break; break;
default: default:
return ICE_ERR_NOT_IMPL; return -EOPNOTSUPP;
} }
/* Each extraction sequence entry is a word in size, and extracts a /* Each extraction sequence entry is a word in size, and extracts a
@ -1073,7 +1073,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* does not exceed the block's capability * does not exceed the block's capability
*/ */
if (params->es_cnt >= fv_words) if (params->es_cnt >= fv_words)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
/* some blocks require a reversed field vector layout */ /* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse) if (hw->blk[params->blk].es.reverse)
@ -1112,12 +1112,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
if (params->prof->segs[seg].raws_cnt > if (params->prof->segs[seg].raws_cnt >
ARRAY_SIZE(params->prof->segs[seg].raws)) ARRAY_SIZE(params->prof->segs[seg].raws))
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
/* Offsets within the segment headers are not supported */ /* Offsets within the segment headers are not supported */
hdrs_sz = ice_flow_calc_seg_sz(params, seg); hdrs_sz = ice_flow_calc_seg_sz(params, seg);
if (!hdrs_sz) if (!hdrs_sz)
return ICE_ERR_PARAM; return -EINVAL;
fv_words = hw->blk[params->blk].es.fvw; fv_words = hw->blk[params->blk].es.fvw;
@ -1150,7 +1150,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/ */
if (params->es_cnt >= hw->blk[params->blk].es.count || if (params->es_cnt >= hw->blk[params->blk].es.count ||
params->es_cnt >= ICE_MAX_FV_WORDS) params->es_cnt >= ICE_MAX_FV_WORDS)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
/* some blocks require a reversed field vector layout */ /* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse) if (hw->blk[params->blk].es.reverse)
@ -1229,7 +1229,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
status = 0; status = 0;
break; break;
default: default:
return ICE_ERR_NOT_IMPL; return -EOPNOTSUPP;
} }
return status; return status;
@ -1334,7 +1334,7 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
struct ice_flow_entry *entry) struct ice_flow_entry *entry)
{ {
if (!entry) if (!entry)
return ICE_ERR_BAD_PTR; return -EINVAL;
list_del(&entry->l_entry); list_del(&entry->l_entry);
@ -1366,16 +1366,16 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
u8 i; u8 i;
if (!prof) if (!prof)
return ICE_ERR_BAD_PTR; return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL); params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params) if (!params)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
GFP_KERNEL); GFP_KERNEL);
if (!params->prof) { if (!params->prof) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto free_params; goto free_params;
} }
@ -1544,13 +1544,13 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
int status; int status;
if (segs_cnt > ICE_FLOW_SEG_MAX) if (segs_cnt > ICE_FLOW_SEG_MAX)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
if (!segs_cnt) if (!segs_cnt)
return ICE_ERR_PARAM; return -EINVAL;
if (!segs) if (!segs)
return ICE_ERR_BAD_PTR; return -EINVAL;
status = ice_flow_val_hdrs(segs, segs_cnt); status = ice_flow_val_hdrs(segs, segs_cnt);
if (status) if (status)
@ -1584,7 +1584,7 @@ ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
prof = ice_flow_find_prof_id(hw, blk, prof_id); prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) { if (!prof) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto out; goto out;
} }
@ -1619,23 +1619,23 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
/* No flow entry data is expected for RSS */ /* No flow entry data is expected for RSS */
if (!entry_h || (!data && blk != ICE_BLK_RSS)) if (!entry_h || (!data && blk != ICE_BLK_RSS))
return ICE_ERR_BAD_PTR; return -EINVAL;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&hw->fl_profs_locks[blk]); mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id); prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) { if (!prof) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
} else { } else {
/* Allocate memory for the entry being added and associate /* Allocate memory for the entry being added and associate
* the VSI to the found flow profile * the VSI to the found flow profile
*/ */
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
if (!e) if (!e)
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
else else
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
} }
@ -1654,7 +1654,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
case ICE_BLK_RSS: case ICE_BLK_RSS:
break; break;
default: default:
status = ICE_ERR_NOT_IMPL; status = -EOPNOTSUPP;
goto out; goto out;
} }
@ -1688,7 +1688,7 @@ int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
int status = 0; int status = 0;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
return ICE_ERR_PARAM; return -EINVAL;
entry = ICE_FLOW_ENTRY_PTR(entry_h); entry = ICE_FLOW_ENTRY_PTR(entry_h);
@ -1853,15 +1853,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return ICE_ERR_PARAM; return -EINVAL;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val)) if (val && !is_power_of_2(val))
return ICE_ERR_CFG; return -EIO;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val)) if (val && !is_power_of_2(val))
return ICE_ERR_CFG; return -EIO;
return 0; return 0;
} }
@ -1906,7 +1906,7 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
int status = 0; int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
if (list_empty(&hw->fl_profs[blk])) if (list_empty(&hw->fl_profs[blk]))
return 0; return 0;
@ -1981,7 +1981,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
GFP_KERNEL); GFP_KERNEL);
if (!rss_cfg) if (!rss_cfg)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
@ -2032,11 +2032,11 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
int status; int status;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
return ICE_ERR_PARAM; return -EINVAL;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs) if (!segs)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Construct the packet segment info from the hashed fields */ /* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@ -2136,7 +2136,7 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
if (hashed_flds == ICE_HASH_INVALID || if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle)) !ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&hw->rss_locks); mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@ -2170,7 +2170,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs) if (!segs)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Construct the packet segment info from the hashed fields */ /* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@ -2182,7 +2182,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
vsi_handle, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS); ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) { if (!prof) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto out; goto out;
} }
@ -2224,7 +2224,7 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
if (hashed_flds == ICE_HASH_INVALID || if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle)) !ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&hw->rss_locks); mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@ -2287,12 +2287,12 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle)) !ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
/* Make sure no unsupported bits are specified */ /* Make sure no unsupported bits are specified */
if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
return ICE_ERR_CFG; return -EIO;
hash_flds = avf_hash; hash_flds = avf_hash;
@ -2352,7 +2352,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
} }
if (rss_hash == ICE_HASH_INVALID) if (rss_hash == ICE_HASH_INVALID)
return ICE_ERR_OUT_OF_RANGE; return -EIO;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE); ICE_FLOW_SEG_HDR_NONE);
@ -2374,7 +2374,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
struct ice_rss_cfg *r; struct ice_rss_cfg *r;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&hw->rss_locks); mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) { list_for_each_entry(r, &hw->rss_list_head, l_entry) {

View File

@ -275,7 +275,7 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) { if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
result = mac_action(vsi, &tmp_list); result = mac_action(vsi, &tmp_list);
@ -304,7 +304,7 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) || if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) { ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
result = mac_action(vsi, &tmp_list); result = mac_action(vsi, &tmp_list);
@ -328,7 +328,7 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action)) if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
return ICE_ERR_NO_MEMORY; return -ENOMEM;
result = vlan_action(vsi, &tmp_list); result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@ -352,7 +352,7 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action)) if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
return ICE_ERR_NO_MEMORY; return -ENOMEM;
result = eth_action(vsi, &tmp_list); result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@ -471,7 +471,7 @@ ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
act &= ~flags_mask; act &= ~flags_mask;

View File

@ -670,7 +670,7 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_fwu_priv priv; struct ice_fwu_priv priv;
enum ice_status status; int status;
int err; int err;
switch (preservation) { switch (preservation) {

View File

@ -1758,7 +1758,7 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!status) { if (!status) {
vsi->num_vlan--; vsi->num_vlan--;
} else if (status == ICE_ERR_DOES_NOT_EXIST) { } else if (status == -ENOENT) {
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n", dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
vid, vsi->vsi_num, status); vid, vsi->vsi_num, status);
} else { } else {
@ -3036,7 +3036,7 @@ void ice_napi_del(struct ice_vsi *vsi)
*/ */
int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_release(struct ice_vsi *vsi)
{ {
enum ice_status err; int err;
struct ice_pf *pf; struct ice_pf *pf;
if (!vsi->back) if (!vsi->back)
@ -3775,39 +3775,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
u64_stats_update_end(&rx_ring->syncp); u64_stats_update_end(&rx_ring->syncp);
} }
/**
* ice_status_to_errno - convert from enum ice_status to Linux errno
* @err: ice_status value to convert
*/
int ice_status_to_errno(enum ice_status err)
{
switch (err) {
case ICE_SUCCESS:
return 0;
case ICE_ERR_DOES_NOT_EXIST:
return -ENOENT;
case ICE_ERR_OUT_OF_RANGE:
case ICE_ERR_AQ_ERROR:
case ICE_ERR_AQ_TIMEOUT:
case ICE_ERR_AQ_EMPTY:
case ICE_ERR_AQ_FW_CRITICAL:
return -EIO;
case ICE_ERR_PARAM:
case ICE_ERR_INVAL_SIZE:
return -EINVAL;
case ICE_ERR_NO_MEMORY:
return -ENOMEM;
case ICE_ERR_MAX_LIMIT:
return -EAGAIN;
case ICE_ERR_RESET_ONGOING:
return -EBUSY;
case ICE_ERR_AQ_FULL:
return -ENOSPC;
default:
return -EINVAL;
}
}
/** /**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @sw: switch to check if its default forwarding VSI is free * @sw: switch to check if its default forwarding VSI is free
@ -4119,7 +4086,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
* a success code. Return an error if FW returns an error code other * a success code. Return an error if FW returns an error code other
* than ICE_AQ_RC_EMODE * than ICE_AQ_RC_EMODE
*/ */
if (status == ICE_ERR_AQ_ERROR) { if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), status, (ena ? "ON" : "OFF"), status,

View File

@ -103,9 +103,6 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
int ice_status_to_errno(enum ice_status err);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);

View File

@ -333,7 +333,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (status) { if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n"); netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */ /* if we failed because of alloc failures, just bail */
if (status == ICE_ERR_NO_MEMORY) { if (status == -ENOMEM) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
@ -346,7 +346,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
* 'if' condition and report it as error. Instead continue processing * 'if' condition and report it as error. Instead continue processing
* rest of the function. * rest of the function.
*/ */
if (status && status != ICE_ERR_ALREADY_EXISTS) { if (status && status != -EEXIST) {
netdev_err(netdev, "Failed to add MAC filters\n"); netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, VSI /* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some * should go into promiscuous mode. There should be some
@ -1424,7 +1424,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
u16 opcode; u16 opcode;
ret = ice_clean_rq_elem(hw, cq, &event, &pending); ret = ice_clean_rq_elem(hw, cq, &event, &pending);
if (ret == ICE_ERR_AQ_NO_WORK) if (ret == -EALREADY)
break; break;
if (ret) { if (ret) {
dev_err(dev, "%s Receive Queue event error %d\n", qtype, dev_err(dev, "%s Receive Queue event error %d\n", qtype,
@ -4218,7 +4218,7 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
* ice_send_version - update firmware with driver version * ice_send_version - update firmware with driver version
* @pf: PF struct * @pf: PF struct
* *
* Returns ICE_SUCCESS on success, else error code * Returns 0 on success, else error code
*/ */
static int ice_send_version(struct ice_pf *pf) static int ice_send_version(struct ice_pf *pf)
{ {
@ -5394,14 +5394,14 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* Clean up old MAC filter. Not an error if old filter doesn't exist */ /* Clean up old MAC filter. Not an error if old filter doesn't exist */
status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
if (status && status != ICE_ERR_DOES_NOT_EXIST) { if (status && status != -ENOENT) {
err = -EADDRNOTAVAIL; err = -EADDRNOTAVAIL;
goto err_update_filters; goto err_update_filters;
} }
/* Add filter for new MAC. If filter exists, return success */ /* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) if (status == -EEXIST)
/* Although this MAC filter is already present in hardware it's /* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was * possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back * modified outside of the driver and needs to be restored back

View File

@ -27,7 +27,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
cmd = &desc.params.nvm; cmd = &desc.params.nvm;
if (offset > ICE_AQC_NVM_MAX_OFFSET) if (offset > ICE_AQC_NVM_MAX_OFFSET)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
@ -74,7 +74,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
/* Verify the length of the read if this is for the Shadow RAM */ /* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n"); ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM; return -EINVAL;
} }
do { do {
@ -131,7 +131,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
/* In offset the highest byte must be zeroed. */ /* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) if (offset & 0xFF000000)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
@ -329,7 +329,7 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
if (!start) { if (!start) {
ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
module); module);
return ICE_ERR_PARAM; return -EINVAL;
} }
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
@ -482,7 +482,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*module_tlv_len = tlv_len; *module_tlv_len = tlv_len;
return 0; return 0;
} }
return ICE_ERR_INVAL_SIZE; return -EINVAL;
} }
/* Check next TLV, i.e. current TLV pointer + length + 2 words /* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length) * (for current TLV's type and length)
@ -490,7 +490,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
next_tlv = next_tlv + tlv_len + 2; next_tlv = next_tlv + tlv_len + 2;
} }
/* Module does not exist */ /* Module does not exist */
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
/** /**
@ -525,7 +525,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
if (pba_tlv_len < pba_size) { if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
return ICE_ERR_INVAL_SIZE; return -EINVAL;
} }
/* Subtract one to get PBA word count (PBA Size word is included in /* Subtract one to get PBA word count (PBA Size word is included in
@ -534,7 +534,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
pba_size--; pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) { if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
return ICE_ERR_PARAM; return -EINVAL;
} }
for (i = 0; i < pba_size; i++) { for (i = 0; i < pba_size; i++) {
@ -650,14 +650,14 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
if (sum) { if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum); sum);
return ICE_ERR_NVM; return -EIO;
} }
*civd = tmp; *civd = tmp;
return 0; return 0;
} }
return ICE_ERR_NVM; return -EIO;
} }
/** /**
@ -730,7 +730,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
return ICE_ERR_NVM; return -EIO;
} }
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
@ -741,7 +741,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (length < ICE_NETLIST_ID_BLK_SIZE) { if (length < ICE_NETLIST_ID_BLK_SIZE) {
ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
ICE_NETLIST_ID_BLK_SIZE, length); ICE_NETLIST_ID_BLK_SIZE, length);
return ICE_ERR_NVM; return -EIO;
} }
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
@ -751,7 +751,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
if (!id_blk) if (!id_blk)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Read out the entire Netlist ID Block at once. */ /* Read out the entire Netlist ID Block at once. */
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
@ -819,7 +819,7 @@ static int ice_discover_flash_size(struct ice_hw *hw)
u8 data; u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false); status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == ICE_ERR_AQ_ERROR && if (status == -EIO &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset); __func__, offset);
@ -933,7 +933,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
/* Check that the control word indicates validity */ /* Check that the control word indicates validity */
if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
return ICE_ERR_CFG; return -EIO;
} }
if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
@ -1021,7 +1021,7 @@ int ice_init_nvm(struct ice_hw *hw)
/* Blank programming mode */ /* Blank programming mode */
flash->blank_nvm_mode = true; flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE; return -EIO;
} }
status = ice_discover_flash_size(hw); status = ice_discover_flash_size(hw);
@ -1080,7 +1080,7 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
if (!status) if (!status)
if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
status = ICE_ERR_NVM_CHECKSUM; status = -EIO;
return status; return status;
} }
@ -1144,7 +1144,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
struct ice_aq_desc desc; struct ice_aq_desc desc;
if (length != 0 && !data) if (length != 0 && !data)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.pkg_data; cmd = &desc.params.pkg_data;
@ -1183,7 +1183,7 @@ ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
int status; int status;
if (!data || !comp_response || !comp_response_code) if (!data || !comp_response || !comp_response_code)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.pass_comp_tbl; cmd = &desc.params.pass_comp_tbl;

View File

@ -19,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_hw *hw; struct ice_hw *hw;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
if (!root) if (!root)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* coverity[suspicious_sizeof] */ /* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL); sizeof(*root), GFP_KERNEL);
if (!root->children) { if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root); devm_kfree(ice_hw_to_dev(hw), root);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
memcpy(&root->info, info, sizeof(*info)); memcpy(&root->info, info, sizeof(*info));
@ -156,7 +156,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_hw *hw; struct ice_hw *hw;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
@ -166,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!parent) { if (!parent) {
ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
le32_to_cpu(info->parent_teid)); le32_to_cpu(info->parent_teid));
return ICE_ERR_PARAM; return -EINVAL;
} }
/* query the current node information from FW before adding it /* query the current node information from FW before adding it
@ -178,7 +178,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node) if (!node)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
if (hw->max_children[layer]) { if (hw->max_children[layer]) {
/* coverity[suspicious_sizeof] */ /* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw), node->children = devm_kcalloc(ice_hw_to_dev(hw),
@ -186,7 +186,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
sizeof(*node), GFP_KERNEL); sizeof(*node), GFP_KERNEL);
if (!node->children) { if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node); devm_kfree(ice_hw_to_dev(hw), node);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
} }
@ -240,7 +240,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
buf_size = struct_size(buf, teid, num_nodes); buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes); buf->hdr.num_elems = cpu_to_le16(num_nodes);
@ -531,7 +531,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
buf_size = sizeof(*buf) * num_nodes; buf_size = sizeof(*buf) * num_nodes;
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
for (i = 0; i < num_nodes; i++) for (i = 0; i < num_nodes; i++)
buf[i] = cpu_to_le32(node_teids[i]); buf[i] = cpu_to_le32(node_teids[i]);
@ -566,7 +566,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx) if (!vsi_ctx)
return ICE_ERR_PARAM; return -EINVAL;
/* allocate LAN queue contexts */ /* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) { if (!vsi_ctx->lan_q_ctx[tc]) {
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@ -574,7 +574,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx), sizeof(*q_ctx),
GFP_KERNEL); GFP_KERNEL);
if (!vsi_ctx->lan_q_ctx[tc]) if (!vsi_ctx->lan_q_ctx[tc])
return ICE_ERR_NO_MEMORY; return -ENOMEM;
vsi_ctx->num_lan_q_entries[tc] = new_numqs; vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0; return 0;
} }
@ -585,7 +585,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL); sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx) if (!q_ctx)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx)); prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
@ -610,7 +610,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx) if (!vsi_ctx)
return ICE_ERR_PARAM; return -EINVAL;
/* allocate RDMA queue contexts */ /* allocate RDMA queue contexts */
if (!vsi_ctx->rdma_q_ctx[tc]) { if (!vsi_ctx->rdma_q_ctx[tc]) {
vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@ -618,7 +618,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx), sizeof(*q_ctx),
GFP_KERNEL); GFP_KERNEL);
if (!vsi_ctx->rdma_q_ctx[tc]) if (!vsi_ctx->rdma_q_ctx[tc])
return ICE_ERR_NO_MEMORY; return -ENOMEM;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs; vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
return 0; return 0;
} }
@ -629,7 +629,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL); sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx) if (!q_ctx)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
prev_num * sizeof(*q_ctx)); prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
@ -731,14 +731,14 @@ ice_sched_del_rl_profile(struct ice_hw *hw,
u16 num_profiles = 1; u16 num_profiles = 1;
if (rl_info->prof_id_ref != 0) if (rl_info->prof_id_ref != 0)
return ICE_ERR_IN_USE; return -EBUSY;
/* Safe to remove profile ID */ /* Safe to remove profile ID */
buf = &rl_info->profile; buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL); &num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles) if (status || num_profiles_removed != num_profiles)
return ICE_ERR_CFG; return -EIO;
/* Delete stale entry now */ /* Delete stale entry now */
list_del(&rl_info->list_entry); list_del(&rl_info->list_entry);
@ -891,7 +891,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
buf_size = struct_size(buf, generic, num_nodes); buf_size = struct_size(buf, generic, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes); buf->hdr.num_elems = cpu_to_le16(num_nodes);
@ -918,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status); hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf); devm_kfree(ice_hw_to_dev(hw), buf);
return ICE_ERR_CFG; return -EIO;
} }
*num_nodes_added = num_nodes; *num_nodes_added = num_nodes;
@ -989,7 +989,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
return 0; return 0;
if (!parent || layer < pi->hw->sw_entry_point_layer) if (!parent || layer < pi->hw->sw_entry_point_layer)
return ICE_ERR_PARAM; return -EINVAL;
/* max children per node per layer */ /* max children per node per layer */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@ -998,8 +998,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
if ((parent->num_children + num_nodes) > max_child_nodes) { if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */ /* Fail if the parent is a TC node */
if (parent == tc_node) if (parent == tc_node)
return ICE_ERR_CFG; return -EIO;
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
} }
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
@ -1045,14 +1045,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (*num_nodes_added > num_nodes) { if (*num_nodes_added > num_nodes) {
ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
*num_nodes_added); *num_nodes_added);
status = ICE_ERR_CFG; status = -EIO;
break; break;
} }
/* break if all the nodes are added successfully */ /* break if all the nodes are added successfully */
if (!status && (*num_nodes_added == num_nodes)) if (!status && (*num_nodes_added == num_nodes))
break; break;
/* break if the error is not max limit */ /* break if the error is not max limit */
if (status && status != ICE_ERR_MAX_LIMIT) if (status && status != -ENOSPC)
break; break;
/* Exceeded the max children */ /* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@ -1208,13 +1208,13 @@ int ice_sched_init_port(struct ice_port_info *pi)
u8 i, j; u8 i, j;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
/* Query the Default Topology from FW */ /* Query the Default Topology from FW */
buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Query default scheduling tree topology */ /* Query default scheduling tree topology */
status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
@ -1226,7 +1226,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
num_branches); num_branches);
status = ICE_ERR_PARAM; status = -EINVAL;
goto err_init_port; goto err_init_port;
} }
@ -1237,7 +1237,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
num_elems); num_elems);
status = ICE_ERR_PARAM; status = -EINVAL;
goto err_init_port; goto err_init_port;
} }
@ -1312,7 +1312,7 @@ int ice_sched_query_res_alloc(struct ice_hw *hw)
buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
if (status) if (status)
@ -1341,7 +1341,7 @@ int ice_sched_query_res_alloc(struct ice_hw *hw)
sizeof(*hw->layer_info)), sizeof(*hw->layer_info)),
GFP_KERNEL); GFP_KERNEL);
if (!hw->layer_info) { if (!hw->layer_info) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto sched_query_out; goto sched_query_out;
} }
@ -1631,14 +1631,14 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) { for (i = vsil + 1; i <= qgl; i++) {
if (!parent) if (!parent)
return ICE_ERR_CFG; return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i], num_nodes[i],
&first_node_teid, &first_node_teid,
&num_added); &num_added);
if (status || num_nodes[i] != num_added) if (status || num_nodes[i] != num_added)
return ICE_ERR_CFG; return -EIO;
/* The newly added node can be a new parent for the next /* The newly added node can be a new parent for the next
* layer nodes * layer nodes
@ -1728,7 +1728,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 i, vsil; u8 i, vsil;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw); vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
@ -1737,7 +1737,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
&first_node_teid, &first_node_teid,
&num_added); &num_added);
if (status || num_nodes[i] != num_added) if (status || num_nodes[i] != num_added)
return ICE_ERR_CFG; return -EIO;
/* The newly added node can be a new parent for the next /* The newly added node can be a new parent for the next
* layer nodes * layer nodes
@ -1749,7 +1749,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent = parent->children[0]; parent = parent->children[0];
if (!parent) if (!parent)
return ICE_ERR_CFG; return -EIO;
if (i == vsil) if (i == vsil)
parent->vsi_handle = vsi_handle; parent->vsi_handle = vsi_handle;
@ -1774,7 +1774,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc); tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node) if (!tc_node)
return ICE_ERR_PARAM; return -EINVAL;
/* calculate number of supported nodes needed for this VSI */ /* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
@ -1808,15 +1808,15 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
tc_node = ice_sched_get_tc_node(pi, tc); tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node) if (!tc_node)
return ICE_ERR_CFG; return -EIO;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node) if (!vsi_node)
return ICE_ERR_CFG; return -EIO;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx) if (!vsi_ctx)
return ICE_ERR_PARAM; return -EINVAL;
if (owner == ICE_SCHED_NODE_OWNER_LAN) if (owner == ICE_SCHED_NODE_OWNER_LAN)
prev_numqs = vsi_ctx->sched.max_lanq[tc]; prev_numqs = vsi_ctx->sched.max_lanq[tc];
@ -1881,10 +1881,10 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc); tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node) if (!tc_node)
return ICE_ERR_PARAM; return -EINVAL;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx) if (!vsi_ctx)
return ICE_ERR_PARAM; return -EINVAL;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
/* suspend the VSI if TC is not enabled */ /* suspend the VSI if TC is not enabled */
@ -1908,7 +1908,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node) if (!vsi_node)
return ICE_ERR_CFG; return -EIO;
vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true; vsi_node->in_use = true;
@ -1996,7 +1996,7 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
static int static int
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{ {
int status = ICE_ERR_PARAM; int status = -EINVAL;
struct ice_vsi_ctx *vsi_ctx; struct ice_vsi_ctx *vsi_ctx;
u8 i; u8 i;
@ -2022,7 +2022,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (ice_sched_is_leaf_node_present(vsi_node)) { if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
status = ICE_ERR_IN_USE; status = -EBUSY;
goto exit_sched_rm_vsi_cfg; goto exit_sched_rm_vsi_cfg;
} }
while (j < vsi_node->num_children) { while (j < vsi_node->num_children) {
@ -2202,22 +2202,22 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
hw = pi->hw; hw = pi->hw;
if (!parent || !num_items) if (!parent || !num_items)
return ICE_ERR_PARAM; return -EINVAL;
/* Does parent have enough space */ /* Does parent have enough space */
if (parent->num_children + num_items > if (parent->num_children + num_items >
hw->max_children[parent->tx_sched_layer]) hw->max_children[parent->tx_sched_layer])
return ICE_ERR_AQ_FULL; return -ENOSPC;
buf_len = struct_size(buf, teid, 1); buf_len = struct_size(buf, teid, 1);
buf = kzalloc(buf_len, GFP_KERNEL); buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
for (i = 0; i < num_items; i++) { for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]); node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) { if (!node) {
status = ICE_ERR_PARAM; status = -EINVAL;
goto move_err_exit; goto move_err_exit;
} }
@ -2228,7 +2228,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL); &grps_movd, NULL);
if (status && grps_movd != 1) { if (status && grps_movd != 1) {
status = ICE_ERR_CFG; status = -EIO;
goto move_err_exit; goto move_err_exit;
} }
@ -2264,15 +2264,15 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
tc_node = ice_sched_get_tc_node(pi, tc); tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node) if (!tc_node)
return ICE_ERR_CFG; return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node) if (!agg_node)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node) if (!vsi_node)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
/* Is this VSI already part of given aggregator? */ /* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
@ -2302,7 +2302,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
&first_node_teid, &first_node_teid,
&num_nodes_added); &num_nodes_added);
if (status || num_nodes[i] != num_nodes_added) if (status || num_nodes[i] != num_nodes_added)
return ICE_ERR_CFG; return -EIO;
/* The newly added node can be a new parent for the next /* The newly added node can be a new parent for the next
* layer nodes * layer nodes
@ -2314,7 +2314,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
parent = parent->children[0]; parent = parent->children[0];
if (!parent) if (!parent)
return ICE_ERR_CFG; return -EIO;
} }
move_nodes: move_nodes:
@ -2405,15 +2405,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc); tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node) if (!tc_node)
return ICE_ERR_CFG; return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node) if (!agg_node)
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
/* Can't remove the aggregator node if it has children */ /* Can't remove the aggregator node if it has children */
if (ice_sched_is_agg_inuse(pi, agg_node)) if (ice_sched_is_agg_inuse(pi, agg_node))
return ICE_ERR_IN_USE; return -EBUSY;
/* need to remove the whole subtree if aggregator node is the /* need to remove the whole subtree if aggregator node is the
* only child. * only child.
@ -2422,7 +2422,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
struct ice_sched_node *parent = agg_node->parent; struct ice_sched_node *parent = agg_node->parent;
if (!parent) if (!parent)
return ICE_ERR_CFG; return -EIO;
if (parent->num_children > 1) if (parent->num_children > 1)
break; break;
@ -2486,7 +2486,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(pi->hw, agg_id); agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info) if (!agg_info)
return ICE_ERR_PARAM; return -EINVAL;
bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap, bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS); ICE_MAX_TRAFFIC_CLASS);
return 0; return 0;
@ -2514,7 +2514,7 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc); tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node) if (!tc_node)
return ICE_ERR_CFG; return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
/* Does Agg node already exist ? */ /* Does Agg node already exist ? */
@ -2549,14 +2549,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
parent = tc_node; parent = tc_node;
for (i = hw->sw_entry_point_layer; i <= aggl; i++) { for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
if (!parent) if (!parent)
return ICE_ERR_CFG; return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i], num_nodes[i],
&first_node_teid, &first_node_teid,
&num_nodes_added); &num_nodes_added);
if (status || num_nodes[i] != num_nodes_added) if (status || num_nodes[i] != num_nodes_added)
return ICE_ERR_CFG; return -EIO;
/* The newly added node can be a new parent for the next /* The newly added node can be a new parent for the next
* layer nodes * layer nodes
@ -2606,7 +2606,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info), agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
GFP_KERNEL); GFP_KERNEL);
if (!agg_info) if (!agg_info)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
agg_info->agg_id = agg_id; agg_info->agg_id = agg_id;
agg_info->agg_type = agg_type; agg_info->agg_type = agg_type;
@ -2733,11 +2733,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
agg_info = ice_get_agg_info(pi->hw, agg_id); agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info) if (!agg_info)
return ICE_ERR_PARAM; return -EINVAL;
/* check if entry already exist */ /* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info) if (!agg_vsi_info)
return ICE_ERR_PARAM; return -EINVAL;
bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap, bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS); ICE_MAX_TRAFFIC_CLASS);
return 0; return 0;
@ -2765,10 +2765,10 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u8 tc; u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle)) if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
agg_info = ice_get_agg_info(hw, agg_id); agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info) if (!agg_info)
return ICE_ERR_PARAM; return -EINVAL;
/* If the VSI is already part of another aggregator then update /* If the VSI is already part of another aggregator then update
* its VSI info list * its VSI info list
*/ */
@ -2790,7 +2790,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw), agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*agg_vsi_info), GFP_KERNEL); sizeof(*agg_vsi_info), GFP_KERNEL);
if (!agg_vsi_info) if (!agg_vsi_info)
return ICE_ERR_PARAM; return -EINVAL;
/* add VSI ID into the aggregator list */ /* add VSI ID into the aggregator list */
agg_vsi_info->vsi_handle = vsi_handle; agg_vsi_info->vsi_handle = vsi_handle;
@ -2874,7 +2874,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
&elem_cfgd, NULL); &elem_cfgd, NULL);
if (status || elem_cfgd != num_elems) { if (status || elem_cfgd != num_elems) {
ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
return ICE_ERR_CFG; return -EIO;
} }
/* Config success case */ /* Config success case */
@ -2909,7 +2909,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
} else { } else {
return ICE_ERR_PARAM; return -EINVAL;
} }
/* Configure element */ /* Configure element */
@ -3102,7 +3102,7 @@ static int
ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
struct ice_aqc_rl_profile_elem *profile) struct ice_aqc_rl_profile_elem *profile)
{ {
int status = ICE_ERR_PARAM; int status = -EINVAL;
s64 bytes_per_sec, ts_rate, mv_tmp; s64 bytes_per_sec, ts_rate, mv_tmp;
bool found = false; bool found = false;
s32 encode = 0; s32 encode = 0;
@ -3150,7 +3150,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
profile->rl_encode = cpu_to_le16(encode); profile->rl_encode = cpu_to_le16(encode);
status = 0; status = 0;
} else { } else {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
} }
return status; return status;
@ -3268,7 +3268,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
* hence only one of them may be set for any given element * hence only one of them may be set for any given element
*/ */
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
return ICE_ERR_CFG; return -EIO;
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
break; break;
@ -3291,7 +3291,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
(le16_to_cpu(data->eir_bw.bw_profile_idx) != (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
ICE_SCHED_DFLT_RL_PROF_ID)) ICE_SCHED_DFLT_RL_PROF_ID))
return ICE_ERR_CFG; return -EIO;
/* EIR BW is set to default, disable it */ /* EIR BW is set to default, disable it */
data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
/* Okay to enable shared BW now */ /* Okay to enable shared BW now */
@ -3300,7 +3300,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
break; break;
default: default:
/* Unknown rate limit type */ /* Unknown rate limit type */
return ICE_ERR_PARAM; return -EINVAL;
} }
/* Configure element */ /* Configure element */
@ -3428,7 +3428,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
int status = 0; int status = 0;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return ICE_ERR_PARAM; return -EINVAL;
/* Check the existing list for RL profile */ /* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry) list_entry)
@ -3441,11 +3441,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Remove old profile ID from database */ /* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
if (status && status != ICE_ERR_IN_USE) if (status && status != -EBUSY)
ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break; break;
} }
if (status == ICE_ERR_IN_USE) if (status == -EBUSY)
status = 0; status = 0;
return status; return status;
} }
@ -3488,7 +3488,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
break; break;
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
/* Save existing RL prof ID for later clean up */ /* Save existing RL prof ID for later clean up */
old_id = ice_sched_get_node_rl_prof_id(node, rl_type); old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
@ -3567,7 +3567,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num) enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{ {
struct ice_aqc_rl_profile_info *rl_prof_info; struct ice_aqc_rl_profile_info *rl_prof_info;
int status = ICE_ERR_PARAM; int status = -EINVAL;
struct ice_hw *hw = pi->hw; struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id; u16 old_id, rl_prof_id;
@ -3619,20 +3619,20 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
u8 layer_num; u8 layer_num;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return -EINVAL;
hw = pi->hw; hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */ /* Remove unused RL profile IDs from HW and SW DB */
ice_sched_rm_unused_rl_prof(pi); ice_sched_rm_unused_rl_prof(pi);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer); node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers) if (layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM; return -EINVAL;
if (rl_type == ICE_SHARED_BW) { if (rl_type == ICE_SHARED_BW) {
/* SRL node may be different */ /* SRL node may be different */
cfg_node = ice_sched_get_srl_node(node, layer_num); cfg_node = ice_sched_get_srl_node(node, layer_num);
if (!cfg_node) if (!cfg_node)
return ICE_ERR_CFG; return -EIO;
} }
/* EIR BW and Shared BW profiles are mutually exclusive and /* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element * hence only one of them may be set for any given element
@ -3690,7 +3690,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
(node->parent && node->parent->num_children == 1))) (node->parent && node->parent->num_children == 1)))
return 0; return 0;
return ICE_ERR_CFG; return -EIO;
} }
/** /**
@ -3715,7 +3715,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
break; break;
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
return 0; return 0;
} }
@ -3735,12 +3735,12 @@ static int
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw) u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{ {
int status = ICE_ERR_PARAM; int status = -EINVAL;
struct ice_sched_node *node; struct ice_sched_node *node;
struct ice_q_ctx *q_ctx; struct ice_q_ctx *q_ctx;
if (!ice_is_vsi_valid(pi->hw, vsi_handle)) if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
if (!q_ctx) if (!q_ctx)
@ -3762,7 +3762,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer); node->tx_sched_layer);
if (sel_layer >= pi->hw->num_tx_sched_layers) { if (sel_layer >= pi->hw->num_tx_sched_layers) {
status = ICE_ERR_PARAM; status = -EINVAL;
goto exit_q_bw_lmt; goto exit_q_bw_lmt;
} }
status = ice_sched_validate_srl_node(node, sel_layer); status = ice_sched_validate_srl_node(node, sel_layer);
@ -3885,7 +3885,7 @@ ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc, enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw) enum ice_rl_type rl_type, u32 bw)
{ {
int status = ICE_ERR_PARAM; int status = -EINVAL;
struct ice_sched_node *node; struct ice_sched_node *node;
if (!pi) if (!pi)
@ -3982,7 +3982,7 @@ int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
bytes > ICE_MAX_BURST_SIZE_ALLOWED) bytes > ICE_MAX_BURST_SIZE_ALLOWED)
return ICE_ERR_PARAM; return -EINVAL;
if (ice_round_to_num(bytes, 64) <= if (ice_round_to_num(bytes, 64) <=
ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
/* 64 byte granularity case */ /* 64 byte granularity case */
@ -4049,7 +4049,7 @@ ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_bw_type_info *bw_t_info) struct ice_bw_type_info *bw_t_info)
{ {
struct ice_port_info *pi = hw->port_info; struct ice_port_info *pi = hw->port_info;
int status = ICE_ERR_PARAM; int status = -EINVAL;
u16 bw_alloc; u16 bw_alloc;
if (!node) if (!node)
@ -4202,7 +4202,7 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
agg_info = ice_get_vsi_agg_info(hw, vsi_handle); agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (!agg_info) if (!agg_info)
return 0; /* Not present in list - default Agg case */ return 0; /* Not present in list - default Agg case */
@ -4260,6 +4260,6 @@ ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
/* Following also checks the presence of node in tree */ /* Following also checks the presence of node in tree */
q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
if (!q_node) if (!q_node)
return ICE_ERR_PARAM; return -EINVAL;
return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
} }

View File

@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
if (vf_id >= snap->mbx_vf.vfcntr_len) if (vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE; return -EIO;
/* increment the message count in the VF array */ /* increment the message count in the VF array */
snap->mbx_vf.vf_cntr[vf_id]++; snap->mbx_vf.vf_cntr[vf_id]++;
@ -309,7 +309,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
int status = 0; int status = 0;
if (!is_malvf || !mbx_data) if (!is_malvf || !mbx_data)
return ICE_ERR_BAD_PTR; return -EINVAL;
/* When entering the mailbox state machine assume that the VF /* When entering the mailbox state machine assume that the VF
* is not malicious until detected. * is not malicious until detected.
@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* interrupt is not less than the defined AVF message threshold. * interrupt is not less than the defined AVF message threshold.
*/ */
if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
return ICE_ERR_INVAL_SIZE; return -EINVAL;
/* The watermark value should not be lesser than the threshold limit /* The watermark value should not be lesser than the threshold limit
* set for the number of asynchronous messages a VF can send to mailbox * set for the number of asynchronous messages a VF can send to mailbox
@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
*/ */
if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
return ICE_ERR_PARAM; return -EINVAL;
new_state = ICE_MAL_VF_DETECT_STATE_INVALID; new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
snap_buf = &snap->mbx_buf; snap_buf = &snap->mbx_buf;
@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
default: default:
new_state = ICE_MAL_VF_DETECT_STATE_INVALID; new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
status = ICE_ERR_CFG; status = -EIO;
} }
snap_buf->state = new_state; snap_buf->state = new_state;
@ -410,15 +410,15 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf) u16 bitmap_len, u16 vf_id, bool *report_malvf)
{ {
if (!all_malvfs || !report_malvf) if (!all_malvfs || !report_malvf)
return ICE_ERR_PARAM; return -EINVAL;
*report_malvf = false; *report_malvf = false;
if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len) if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE; return -EINVAL;
if (vf_id >= bitmap_len) if (vf_id >= bitmap_len)
return ICE_ERR_OUT_OF_RANGE; return -EIO;
/* If the vf_id is found in the bitmap set bit and boolean to true */ /* If the vf_id is found in the bitmap set bit and boolean to true */
if (!test_and_set_bit(vf_id, all_malvfs)) if (!test_and_set_bit(vf_id, all_malvfs))
@ -446,14 +446,14 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id) u16 bitmap_len, u16 vf_id)
{ {
if (!snap || !all_malvfs) if (!snap || !all_malvfs)
return ICE_ERR_PARAM; return -EINVAL;
if (bitmap_len < snap->mbx_vf.vfcntr_len) if (bitmap_len < snap->mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE; return -EINVAL;
/* Ensure VF ID value is not larger than bitmap or VF counter length */ /* Ensure VF ID value is not larger than bitmap or VF counter length */
if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len) if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE; return -EIO;
/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */ /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
clear_bit(vf_id, all_malvfs); clear_bit(vf_id, all_malvfs);
@ -491,13 +491,13 @@ int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
* the functional capabilities of the PF. * the functional capabilities of the PF.
*/ */
if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs) if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
return ICE_ERR_INVAL_SIZE; return -EINVAL;
snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count, snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
sizeof(*snap->mbx_vf.vf_cntr), sizeof(*snap->mbx_vf.vf_cntr),
GFP_KERNEL); GFP_KERNEL);
if (!snap->mbx_vf.vf_cntr) if (!snap->mbx_vf.vf_cntr)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Setting the VF counter length to the number of allocated /* Setting the VF counter length to the number of allocated
* VFs for given PF's functional capabilities. * VFs for given PF's functional capabilities.

View File

@ -1,44 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_STATUS_H_
#define _ICE_STATUS_H_
/* Error Codes */
enum ice_status {
ICE_SUCCESS = 0,
/* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
ICE_ERR_RESET_FAILED = -9,
ICE_ERR_FW_API_VER = -10,
ICE_ERR_NO_MEMORY = -11,
ICE_ERR_CFG = -12,
ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
ICE_ERR_FW_DDP_MISMATCH = -20,
ICE_ERR_NVM = -50,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
ICE_ERR_AQ_TIMEOUT = -101,
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
ICE_ERR_AQ_FW_CRITICAL = -105,
};
#endif /* _ICE_STATUS_H_ */

View File

@ -536,7 +536,7 @@ int ice_init_def_sw_recp(struct ice_hw *hw)
recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
sizeof(*recps), GFP_KERNEL); sizeof(*recps), GFP_KERNEL);
if (!recps) if (!recps)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i; recps[i].root_rid = i;
@ -840,7 +840,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
int status; int status;
if (vsi_handle >= ICE_MAX_VSI) if (vsi_handle >= ICE_MAX_VSI)
return ICE_ERR_PARAM; return -EINVAL;
status = ice_aq_add_vsi(hw, vsi_ctx, cd); status = ice_aq_add_vsi(hw, vsi_ctx, cd);
if (status) if (status)
return status; return status;
@ -851,7 +851,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
sizeof(*tmp_vsi_ctx), GFP_KERNEL); sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) { if (!tmp_vsi_ctx) {
ice_aq_free_vsi(hw, vsi_ctx, false, cd); ice_aq_free_vsi(hw, vsi_ctx, false, cd);
return ICE_ERR_NO_MEMORY; return -ENOMEM;
} }
*tmp_vsi_ctx = *vsi_ctx; *tmp_vsi_ctx = *vsi_ctx;
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
@ -880,7 +880,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
int status; int status;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
if (!status) if (!status)
@ -902,7 +902,7 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
return ice_aq_update_vsi(hw, vsi_ctx, cd); return ice_aq_update_vsi(hw, vsi_ctx, cd);
} }
@ -927,7 +927,7 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
else else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); return ice_update_vsi(hw, vsi_handle, ctx, NULL);
} }
/** /**
@ -952,7 +952,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
buf_len = struct_size(sw_buf, elem, 1); buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf) if (!sw_buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1); sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC || if (lkup_type == ICE_SW_LKUP_MAC ||
@ -966,7 +966,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
sw_buf->res_type = sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else { } else {
status = ICE_ERR_PARAM; status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit; goto ice_aq_alloc_free_vsi_list_exit;
} }
@ -1008,7 +1008,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
if (opc != ice_aqc_opc_add_sw_rules && if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules && opc != ice_aqc_opc_update_sw_rules &&
opc != ice_aqc_opc_remove_sw_rules) opc != ice_aqc_opc_remove_sw_rules)
return ICE_ERR_PARAM; return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc); ice_fill_dflt_direct_cmd_desc(&desc, opc);
@ -1018,7 +1018,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules && if (opc != ice_aqc_opc_add_sw_rules &&
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
return status; return status;
} }
@ -1080,7 +1080,7 @@ ice_aq_get_recipe(struct ice_hw *hw,
u16 buf_size; u16 buf_size;
if (*num_recipes != ICE_MAX_NUM_RECIPES) if (*num_recipes != ICE_MAX_NUM_RECIPES)
return ICE_ERR_PARAM; return -EINVAL;
cmd = &desc.params.add_get_recipe; cmd = &desc.params.add_get_recipe;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
@ -1163,7 +1163,7 @@ static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
buf_len = struct_size(sw_buf, elem, 1); buf_len = struct_size(sw_buf, elem, 1);
sw_buf = kzalloc(buf_len, GFP_KERNEL); sw_buf = kzalloc(buf_len, GFP_KERNEL);
if (!sw_buf) if (!sw_buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1); sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
@ -1247,7 +1247,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* we need a buffer big enough to accommodate all the recipes */ /* we need a buffer big enough to accommodate all the recipes */
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp) if (!tmp)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
tmp[0].recipe_indx = rid; tmp[0].recipe_indx = rid;
status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
@ -1284,7 +1284,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
GFP_KERNEL); GFP_KERNEL);
if (!rg_entry) { if (!rg_entry) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_unroll; goto err_unroll;
} }
@ -1364,7 +1364,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
GFP_KERNEL); GFP_KERNEL);
if (!recps[rid].root_buf) { if (!recps[rid].root_buf) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_unroll; goto err_unroll;
} }
@ -1419,7 +1419,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
GFP_KERNEL); GFP_KERNEL);
if (!rbuf) if (!rbuf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Multiple calls to ice_aq_get_sw_cfg may be required /* Multiple calls to ice_aq_get_sw_cfg may be required
* to get all the switch configuration information. The need * to get all the switch configuration information. The need
@ -1688,7 +1688,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 id; u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM; return -EINVAL;
/* Create two back-to-back switch rules and submit them to the HW using /* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer: * one memory buffer:
@ -1699,7 +1699,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
if (!lg_act) if (!lg_act)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
@ -1820,7 +1820,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
int i; int i;
if (!num_vsi) if (!num_vsi)
return ICE_ERR_PARAM; return -EINVAL;
if (lkup_type == ICE_SW_LKUP_MAC || if (lkup_type == ICE_SW_LKUP_MAC ||
lkup_type == ICE_SW_LKUP_MAC_VLAN || lkup_type == ICE_SW_LKUP_MAC_VLAN ||
@ -1834,15 +1834,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else else
return ICE_ERR_PARAM; return -EINVAL;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
for (i = 0; i < num_vsi; i++) { for (i = 0; i < num_vsi; i++) {
if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
status = ICE_ERR_PARAM; status = -EINVAL;
goto exit; goto exit;
} }
/* AQ call requires hw_vsi_id(s) */ /* AQ call requires hw_vsi_id(s) */
@ -1908,11 +1908,11 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
GFP_KERNEL); GFP_KERNEL);
if (!fm_entry) { if (!fm_entry) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto ice_create_pkt_fwd_rule_exit; goto ice_create_pkt_fwd_rule_exit;
} }
@ -1968,7 +1968,7 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
@ -2055,13 +2055,13 @@ ice_add_update_vsi_list(struct ice_hw *hw,
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
return ICE_ERR_NOT_IMPL; return -EOPNOTSUPP;
if ((new_fltr->fltr_act == ICE_FWD_TO_Q || if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
new_fltr->fltr_act == ICE_FWD_TO_QGRP) && new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->fltr_act == ICE_FWD_TO_VSI || (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
return ICE_ERR_NOT_IMPL; return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already /* Only one entry existed in the mapping and it was not already
@ -2073,7 +2073,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */ /* A rule already exists with the new VSI being added */
if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
return ICE_ERR_ALREADY_EXISTS; return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle; vsi_handle_arr[0] = cur_fltr->vsi_handle;
vsi_handle_arr[1] = new_fltr->vsi_handle; vsi_handle_arr[1] = new_fltr->vsi_handle;
@ -2101,7 +2101,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
vsi_list_id); vsi_list_id);
if (!m_entry->vsi_list_info) if (!m_entry->vsi_list_info)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* If this entry was large action then the large action needs /* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list * to be updated to point to FWD to VSI list
@ -2116,7 +2116,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
enum ice_adminq_opc opcode; enum ice_adminq_opc opcode;
if (!m_entry->vsi_list_info) if (!m_entry->vsi_list_info)
return ICE_ERR_CFG; return -EIO;
/* A rule already exists with the new VSI being added */ /* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@ -2220,7 +2220,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
int status = 0; int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id = f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@ -2266,7 +2266,7 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
@ -2298,11 +2298,11 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0) fm_list->vsi_count == 0)
return ICE_ERR_PARAM; return -EINVAL;
/* A rule with the VSI being removed does not exist */ /* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
lkup_type = fm_list->fltr_info.lkup_type; lkup_type = fm_list->fltr_info.lkup_type;
vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
@ -2324,7 +2324,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI); ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle)) if (!ice_is_vsi_valid(hw, rem_vsi_handle))
return ICE_ERR_OUT_OF_RANGE; return -EIO;
/* Make sure VSI list is empty before removing it below */ /* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@ -2387,7 +2387,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
u16 vsi_handle; u16 vsi_handle;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id = f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@ -2395,14 +2395,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
mutex_lock(rule_lock); mutex_lock(rule_lock);
list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
if (!list_elem) { if (!list_elem) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto exit; goto exit;
} }
if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
remove_rule = true; remove_rule = true;
} else if (!list_elem->vsi_list_info) { } else if (!list_elem->vsi_list_info) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto exit; goto exit;
} else if (list_elem->vsi_list_info->ref_cnt > 1) { } else if (list_elem->vsi_list_info->ref_cnt > 1) {
/* a ref_cnt > 1 indicates that the vsi_list is being /* a ref_cnt > 1 indicates that the vsi_list is being
@ -2435,7 +2435,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
ICE_SW_RULE_RX_TX_NO_HDR_SIZE, ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!s_rule) { if (!s_rule) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto exit; goto exit;
} }
@ -2603,7 +2603,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
u8 elem_sent; u8 elem_sent;
if (!m_list || !hw) if (!m_list || !hw)
return ICE_ERR_PARAM; return -EINVAL;
s_rule = NULL; s_rule = NULL;
sw = hw->switch_info; sw = hw->switch_info;
@ -2616,23 +2616,23 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
m_list_itr->fltr_info.flag = ICE_FLTR_TX; m_list_itr->fltr_info.flag = ICE_FLTR_TX;
vsi_handle = m_list_itr->fltr_info.vsi_handle; vsi_handle = m_list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */ /* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM; return -EINVAL;
m_list_itr->fltr_info.src = hw_vsi_id; m_list_itr->fltr_info.src = hw_vsi_id;
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add)) is_zero_ether_addr(add))
return ICE_ERR_PARAM; return -EINVAL;
if (is_unicast_ether_addr(add) && !hw->ucast_shared) { if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
/* Don't overwrite the unicast address */ /* Don't overwrite the unicast address */
mutex_lock(rule_lock); mutex_lock(rule_lock);
if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
&m_list_itr->fltr_info)) { &m_list_itr->fltr_info)) {
mutex_unlock(rule_lock); mutex_unlock(rule_lock);
return ICE_ERR_ALREADY_EXISTS; return -EEXIST;
} }
mutex_unlock(rule_lock); mutex_unlock(rule_lock);
num_unicast++; num_unicast++;
@ -2660,7 +2660,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL); GFP_KERNEL);
if (!s_rule) { if (!s_rule) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto ice_add_mac_exit; goto ice_add_mac_exit;
} }
@ -2710,7 +2710,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fm_entry), GFP_KERNEL); sizeof(*fm_entry), GFP_KERNEL);
if (!fm_entry) { if (!fm_entry) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto ice_add_mac_exit; goto ice_add_mac_exit;
} }
fm_entry->fltr_info = *f_info; fm_entry->fltr_info = *f_info;
@ -2749,7 +2749,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
int status = 0; int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id = f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@ -2757,10 +2757,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* VLAN ID should only be 12 bits */ /* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM; return -EINVAL;
if (new_fltr->src_id != ICE_SRC_ID_VSI) if (new_fltr->src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM; return -EINVAL;
new_fltr->src = new_fltr->fwd_id.hw_vsi_id; new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
lkup_type = new_fltr->lkup_type; lkup_type = new_fltr->lkup_type;
@ -2799,7 +2799,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
new_fltr); new_fltr);
if (!v_list_itr) { if (!v_list_itr) {
status = ICE_ERR_DOES_NOT_EXIST; status = -ENOENT;
goto exit; goto exit;
} }
/* reuse VSI list for new rule and increment ref_cnt */ /* reuse VSI list for new rule and increment ref_cnt */
@ -2835,7 +2835,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
if (v_list_itr->vsi_count > 1 && if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) { v_list_itr->vsi_list_info->ref_cnt > 1) {
ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
status = ICE_ERR_CFG; status = -EIO;
goto exit; goto exit;
} }
@ -2845,7 +2845,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* A rule already exists with the new VSI being added */ /* A rule already exists with the new VSI being added */
if (cur_handle == vsi_handle) { if (cur_handle == vsi_handle) {
status = ICE_ERR_ALREADY_EXISTS; status = -EEXIST;
goto exit; goto exit;
} }
@ -2895,11 +2895,11 @@ int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
struct ice_fltr_list_entry *v_list_itr; struct ice_fltr_list_entry *v_list_itr;
if (!v_list || !hw) if (!v_list || !hw)
return ICE_ERR_PARAM; return -EINVAL;
list_for_each_entry(v_list_itr, v_list, list_entry) { list_for_each_entry(v_list_itr, v_list, list_entry) {
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
return ICE_ERR_PARAM; return -EINVAL;
v_list_itr->fltr_info.flag = ICE_FLTR_TX; v_list_itr->fltr_info.flag = ICE_FLTR_TX;
v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
if (v_list_itr->status) if (v_list_itr->status)
@ -2923,7 +2923,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
struct ice_fltr_list_entry *em_list_itr; struct ice_fltr_list_entry *em_list_itr;
if (!em_list || !hw) if (!em_list || !hw)
return ICE_ERR_PARAM; return -EINVAL;
list_for_each_entry(em_list_itr, em_list, list_entry) { list_for_each_entry(em_list_itr, em_list, list_entry) {
enum ice_sw_lkup_type l_type = enum ice_sw_lkup_type l_type =
@ -2931,7 +2931,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE) l_type != ICE_SW_LKUP_ETHERTYPE)
return ICE_ERR_PARAM; return -EINVAL;
em_list_itr->status = ice_add_rule_internal(hw, l_type, em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr); em_list_itr);
@ -2952,7 +2952,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
struct ice_fltr_list_entry *em_list_itr, *tmp; struct ice_fltr_list_entry *em_list_itr, *tmp;
if (!em_list || !hw) if (!em_list || !hw)
return ICE_ERR_PARAM; return -EINVAL;
list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
enum ice_sw_lkup_type l_type = enum ice_sw_lkup_type l_type =
@ -2960,7 +2960,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE) l_type != ICE_SW_LKUP_ETHERTYPE)
return ICE_ERR_PARAM; return -EINVAL;
em_list_itr->status = ice_remove_rule_internal(hw, l_type, em_list_itr->status = ice_remove_rule_internal(hw, l_type,
em_list_itr); em_list_itr);
@ -3031,7 +3031,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
u16 hw_vsi_id; u16 hw_vsi_id;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
@ -3039,7 +3039,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
memset(&f_info, 0, sizeof(f_info)); memset(&f_info, 0, sizeof(f_info));
@ -3137,7 +3137,7 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* This function removes either a MAC filter rule or a specific VSI from a * This function removes either a MAC filter rule or a specific VSI from a
* VSI list for a multicast MAC address. * VSI list for a multicast MAC address.
* *
* Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by * Returns -ENOENT if a given entry was not added by
* ice_add_mac. Caller should be aware that this call will only work if all * ice_add_mac. Caller should be aware that this call will only work if all
* the entries passed into m_list were added previously. It will not attempt to * the entries passed into m_list were added previously. It will not attempt to
* do a partial remove of entries that were found. * do a partial remove of entries that were found.
@ -3148,7 +3148,7 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
struct mutex *rule_lock; /* Lock to protect filter rule list */ struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list) if (!m_list)
return ICE_ERR_PARAM; return -EINVAL;
rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
@ -3157,11 +3157,11 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
u16 vsi_handle; u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC) if (l_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM; return -EINVAL;
vsi_handle = list_itr->fltr_info.vsi_handle; vsi_handle = list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
list_itr->fltr_info.fwd_id.hw_vsi_id = list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle); ice_get_hw_vsi_num(hw, vsi_handle);
@ -3174,7 +3174,7 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
&list_itr->fltr_info)) { &list_itr->fltr_info)) {
mutex_unlock(rule_lock); mutex_unlock(rule_lock);
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
mutex_unlock(rule_lock); mutex_unlock(rule_lock);
} }
@ -3198,13 +3198,13 @@ ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
struct ice_fltr_list_entry *v_list_itr, *tmp; struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw) if (!v_list || !hw)
return ICE_ERR_PARAM; return -EINVAL;
list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN) if (l_type != ICE_SW_LKUP_VLAN)
return ICE_ERR_PARAM; return -EINVAL;
v_list_itr->status = ice_remove_rule_internal(hw, v_list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_VLAN, ICE_SW_LKUP_VLAN,
v_list_itr); v_list_itr);
@ -3254,7 +3254,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
*/ */
tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
if (!tmp) if (!tmp)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
tmp->fltr_info = *fi; tmp->fltr_info = *fi;
@ -3295,7 +3295,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
/* check to make sure VSI ID is valid and within boundary */ /* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) { list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
@ -3385,7 +3385,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u8 recipe_id; u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN; recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
@ -3457,7 +3457,7 @@ ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
u8 recipe_id; u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&new_fltr, 0, sizeof(new_fltr)); memset(&new_fltr, 0, sizeof(new_fltr));
@ -3693,7 +3693,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
buf_len = struct_size(buf, elem, 1); buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL); buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items); buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@ -3731,7 +3731,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
buf_len = struct_size(buf, elem, 1); buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL); buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) if (!buf)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items); buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@ -3965,7 +3965,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
sizeof(*entry), sizeof(*entry),
GFP_KERNEL); GFP_KERNEL);
if (!entry) if (!entry)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
list_add(&entry->l_entry, rg_list); list_add(&entry->l_entry, rg_list);
grp = &entry->r_group; grp = &entry->r_group;
(*recp_cnt)++; (*recp_cnt)++;
@ -4033,7 +4033,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
* invalid pair * invalid pair
*/ */
if (!found) if (!found)
return ICE_ERR_PARAM; return -EINVAL;
} }
} }
@ -4143,22 +4143,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (rm->n_grp_count > 1) { if (rm->n_grp_count > 1) {
if (rm->n_grp_count > free_res_idx) if (rm->n_grp_count > free_res_idx)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
rm->n_grp_count++; rm->n_grp_count++;
} }
if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp) if (!tmp)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
GFP_KERNEL); GFP_KERNEL);
if (!buf) { if (!buf) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_mem; goto err_mem;
} }
@ -4218,7 +4218,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
*/ */
if (chain_idx >= ICE_MAX_FV_WORDS) { if (chain_idx >= ICE_MAX_FV_WORDS) {
ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
status = ICE_ERR_MAX_LIMIT; status = -ENOSPC;
goto err_unroll; goto err_unroll;
} }
@ -4249,7 +4249,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[0].recipe_bitmap, rm->r_bitmap, memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
sizeof(buf[0].recipe_bitmap)); sizeof(buf[0].recipe_bitmap));
} else { } else {
status = ICE_ERR_BAD_PTR; status = -EINVAL;
goto err_unroll; goto err_unroll;
} }
/* Applicable only for ROOT_RECIPE, set the fwd_priority for /* Applicable only for ROOT_RECIPE, set the fwd_priority for
@ -4285,7 +4285,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
sizeof(*last_chain_entry), sizeof(*last_chain_entry),
GFP_KERNEL); GFP_KERNEL);
if (!last_chain_entry) { if (!last_chain_entry) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_unroll; goto err_unroll;
} }
last_chain_entry->rid = rid; last_chain_entry->rid = rid;
@ -4320,7 +4320,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
sizeof(buf[recps].recipe_bitmap)); sizeof(buf[recps].recipe_bitmap));
} else { } else {
status = ICE_ERR_BAD_PTR; status = -EINVAL;
goto err_unroll; goto err_unroll;
} }
buf[recps].content.act_ctrl_fwd_priority = rm->priority; buf[recps].content.act_ctrl_fwd_priority = rm->priority;
@ -4354,7 +4354,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
} }
if (!idx_found) { if (!idx_found) {
status = ICE_ERR_OUT_OF_RANGE; status = -EIO;
goto err_unroll; goto err_unroll;
} }
@ -4452,11 +4452,11 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
if (!prot_ids) if (!prot_ids)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
for (i = 0; i < lkups_cnt; i++) for (i = 0; i < lkups_cnt; i++)
if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
status = ICE_ERR_CFG; status = -EIO;
goto free_mem; goto free_mem;
} }
@ -4493,7 +4493,7 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* @rinfo: other information regarding the rule e.g. priority and action info * @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure * @lkup_exts: lookup word structure
*/ */
static enum ice_status static int
ice_add_special_words(struct ice_adv_rule_info *rinfo, ice_add_special_words(struct ice_adv_rule_info *rinfo,
struct ice_prot_lkup_ext *lkup_exts) struct ice_prot_lkup_ext *lkup_exts)
{ {
@ -4510,7 +4510,7 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
lkup_exts->field_mask[word] = mask; lkup_exts->field_mask[word] = mask;
} else { } else {
return ICE_ERR_MAX_LIMIT; return -ENOSPC;
} }
} }
@ -4577,11 +4577,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u8 i; u8 i;
if (!lkups_cnt) if (!lkups_cnt)
return ICE_ERR_PARAM; return -EINVAL;
lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
if (!lkup_exts) if (!lkup_exts)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
/* Determine the number of words to be matched and if it exceeds a /* Determine the number of words to be matched and if it exceeds a
* recipe's restrictions * recipe's restrictions
@ -4590,20 +4590,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 count; u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST) { if (lkups[i].type >= ICE_PROTOCOL_LAST) {
status = ICE_ERR_CFG; status = -EIO;
goto err_free_lkup_exts; goto err_free_lkup_exts;
} }
count = ice_fill_valid_words(&lkups[i], lkup_exts); count = ice_fill_valid_words(&lkups[i], lkup_exts);
if (!count) { if (!count) {
status = ICE_ERR_CFG; status = -EIO;
goto err_free_lkup_exts; goto err_free_lkup_exts;
} }
} }
rm = kzalloc(sizeof(*rm), GFP_KERNEL); rm = kzalloc(sizeof(*rm), GFP_KERNEL);
if (!rm) { if (!rm) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_free_lkup_exts; goto err_free_lkup_exts;
} }
@ -4883,7 +4883,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
} }
/* this should never happen in a correct calling sequence */ /* this should never happen in a correct calling sequence */
if (!found) if (!found)
return ICE_ERR_PARAM; return -EINVAL;
switch (lkups[i].type) { switch (lkups[i].type) {
case ICE_MAC_OFOS: case ICE_MAC_OFOS:
@ -4920,12 +4920,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_udp_tnl_hdr); len = sizeof(struct ice_udp_tnl_hdr);
break; break;
default: default:
return ICE_ERR_PARAM; return -EINVAL;
} }
/* the length should be a word multiple */ /* the length should be a word multiple */
if (len % ICE_BYTES_PER_WORD) if (len % ICE_BYTES_PER_WORD)
return ICE_ERR_CFG; return -EIO;
/* We have the offset to the header start, the length, the /* We have the offset to the header start, the length, the
* caller's header values and mask. Use this information to * caller's header values and mask. Use this information to
@ -4955,7 +4955,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt: dummy packet to fill in * @pkt: dummy packet to fill in
* @offsets: offset info for the dummy packet * @offsets: offset info for the dummy packet
*/ */
static enum ice_status static int
ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
{ {
@ -4964,11 +4964,11 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
switch (tun_type) { switch (tun_type) {
case ICE_SW_TUN_VXLAN: case ICE_SW_TUN_VXLAN:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
return ICE_ERR_CFG; return -EIO;
break; break;
case ICE_SW_TUN_GENEVE: case ICE_SW_TUN_GENEVE:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
return ICE_ERR_CFG; return -EIO;
break; break;
default: default:
/* Nothing needs to be done for this tunnel type */ /* Nothing needs to be done for this tunnel type */
@ -4989,7 +4989,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
} }
} }
return ICE_ERR_CFG; return -EIO;
} }
/** /**
@ -5066,13 +5066,13 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
return ICE_ERR_NOT_IMPL; return -EOPNOTSUPP;
if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
return ICE_ERR_NOT_IMPL; return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already /* Only one entry existed in the mapping and it was not already
@ -5085,7 +5085,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */ /* A rule already exists with the new VSI being added */
if (cur_fltr->sw_act.fwd_id.hw_vsi_id == if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
new_fltr->sw_act.fwd_id.hw_vsi_id) new_fltr->sw_act.fwd_id.hw_vsi_id)
return ICE_ERR_ALREADY_EXISTS; return -EEXIST;
vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
@ -5118,7 +5118,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->sw_act.vsi_handle; u16 vsi_handle = new_fltr->sw_act.vsi_handle;
if (!m_entry->vsi_list_info) if (!m_entry->vsi_list_info)
return ICE_ERR_CFG; return -EIO;
/* A rule already exists with the new VSI being added */ /* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@ -5184,7 +5184,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
} }
if (!lkups_cnt) if (!lkups_cnt)
return ICE_ERR_PARAM; return -EINVAL;
/* get # of words we need to match */ /* get # of words we need to match */
word_cnt = 0; word_cnt = 0;
@ -5198,13 +5198,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
} }
if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
return ICE_ERR_PARAM; return -EINVAL;
/* make sure that we can locate a dummy packet */ /* make sure that we can locate a dummy packet */
ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
&pkt_offsets); &pkt_offsets);
if (!pkt) { if (!pkt) {
status = ICE_ERR_PARAM; status = -EINVAL;
goto err_ice_add_adv_rule; goto err_ice_add_adv_rule;
} }
@ -5212,11 +5212,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
return ICE_ERR_CFG; return -EIO;
vsi_handle = rinfo->sw_act.vsi_handle; vsi_handle = rinfo->sw_act.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle)) if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM; return -EINVAL;
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id = rinfo->sw_act.fwd_id.hw_vsi_id =
@ -5250,7 +5250,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
if (!rinfo->flags_info.act_valid) { if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE; act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE; act |= ICE_SINGLE_ACT_LB_ENABLE;
@ -5284,7 +5284,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ICE_SINGLE_ACT_VALID_BIT; ICE_SINGLE_ACT_VALID_BIT;
break; break;
default: default:
status = ICE_ERR_CFG; status = -EIO;
goto err_ice_add_adv_rule; goto err_ice_add_adv_rule;
} }
@ -5329,14 +5329,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
sizeof(struct ice_adv_fltr_mgmt_list_entry), sizeof(struct ice_adv_fltr_mgmt_list_entry),
GFP_KERNEL); GFP_KERNEL);
if (!adv_fltr) { if (!adv_fltr) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_ice_add_adv_rule; goto err_ice_add_adv_rule;
} }
adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
lkups_cnt * sizeof(*lkups), GFP_KERNEL); lkups_cnt * sizeof(*lkups), GFP_KERNEL);
if (!adv_fltr->lkups) { if (!adv_fltr->lkups) {
status = ICE_ERR_NO_MEMORY; status = -ENOMEM;
goto err_ice_add_adv_rule; goto err_ice_add_adv_rule;
} }
@ -5444,11 +5444,11 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0) fm_list->vsi_count == 0)
return ICE_ERR_PARAM; return -EINVAL;
/* A rule with the VSI being removed does not exist */ /* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
lkup_type = ICE_SW_LKUP_LAST; lkup_type = ICE_SW_LKUP_LAST;
vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
@ -5468,7 +5468,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI); ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle)) if (!ice_is_vsi_valid(hw, rem_vsi_handle))
return ICE_ERR_OUT_OF_RANGE; return -EIO;
/* Make sure VSI list is empty before removing it below */ /* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@ -5548,11 +5548,11 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 count; u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST) if (lkups[i].type >= ICE_PROTOCOL_LAST)
return ICE_ERR_CFG; return -EIO;
count = ice_fill_valid_words(&lkups[i], &lkup_exts); count = ice_fill_valid_words(&lkups[i], &lkup_exts);
if (!count) if (!count)
return ICE_ERR_CFG; return -EIO;
} }
/* Create any special protocol/offset pairs, such as looking at tunnel /* Create any special protocol/offset pairs, such as looking at tunnel
@ -5565,7 +5565,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */ /* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES) if (rid == ICE_MAX_NUM_RECIPES)
return ICE_ERR_PARAM; return -EINVAL;
rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
@ -5597,7 +5597,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule) if (!s_rule)
return ICE_ERR_NO_MEMORY; return -ENOMEM;
s_rule->pdata.lkup_tx_rx.act = 0; s_rule->pdata.lkup_tx_rx.act = 0;
s_rule->pdata.lkup_tx_rx.index = s_rule->pdata.lkup_tx_rx.index =
cpu_to_le16(list_elem->rule_info.fltr_rule_id); cpu_to_le16(list_elem->rule_info.fltr_rule_id);
@ -5605,7 +5605,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1, rule_buf_sz, 1,
ice_aqc_opc_remove_sw_rules, NULL); ice_aqc_opc_remove_sw_rules, NULL);
if (!status || status == ICE_ERR_DOES_NOT_EXIST) { if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info; struct ice_switch_info *sw = hw->switch_info;
mutex_lock(rule_lock); mutex_lock(rule_lock);
@ -5641,7 +5641,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
sw = hw->switch_info; sw = hw->switch_info;
if (!sw->recp_list[remove_entry->rid].recp_created) if (!sw->recp_list[remove_entry->rid].recp_created)
return ICE_ERR_PARAM; return -EINVAL;
list_head = &sw->recp_list[remove_entry->rid].filt_rules; list_head = &sw->recp_list[remove_entry->rid].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) { list_for_each_entry(list_itr, list_head, list_entry) {
if (list_itr->rule_info.fltr_rule_id == if (list_itr->rule_info.fltr_rule_id ==
@ -5653,7 +5653,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
} }
} }
/* either list is empty or unable to find rule */ /* either list is empty or unable to find rule */
return ICE_ERR_DOES_NOT_EXIST; return -ENOENT;
} }
/** /**

View File

@ -450,7 +450,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.fltr_rule_id = fltr->cookie; rule_info.fltr_rule_id = fltr->cookie;
status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (status == ICE_ERR_ALREADY_EXISTS) { if (status == -EEXIST) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL; ret = -EINVAL;
goto exit; goto exit;
@ -1162,7 +1162,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.vsi_handle = fltr->dest_id; rule_rem.vsi_handle = fltr->dest_id;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) { if (err) {
if (err == ICE_ERR_DOES_NOT_EXIST) { if (err == -ENOENT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
return -ENOENT; return -ENOENT;
} }

View File

@ -7,7 +7,6 @@
#define ICE_BYTES_PER_WORD 2 #define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4 #define ICE_BYTES_PER_DWORD 4
#include "ice_status.h"
#include "ice_hw_autogen.h" #include "ice_hw_autogen.h"
#include "ice_osdep.h" #include "ice_osdep.h"
#include "ice_controlq.h" #include "ice_controlq.h"

View File

@ -605,7 +605,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
tun + 1, &prof); tun + 1, &prof);
ret = ice_status_to_errno(status); ret = status;
if (ret) { if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id); flow, vf->vf_id);
@ -615,7 +615,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h); seg, &entry1_h);
ret = ice_status_to_errno(status); ret = status;
if (ret) { if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id); flow, vf->vf_id);
@ -625,7 +625,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h); seg, &entry2_h);
ret = ice_status_to_errno(status); ret = status;
if (ret) { if (ret) {
dev_dbg(dev, dev_dbg(dev,
"Could not add flow 0x%x Ctrl VSI entry for VF %d\n", "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
@ -1230,7 +1230,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
ice_fdir_get_prgm_desc(hw, input, &desc, add); ice_fdir_get_prgm_desc(hw, input, &desc, add);
status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
ret = ice_status_to_errno(status); ret = status;
if (ret) { if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type); vf->vf_id, input->flow_type);

View File

@ -862,7 +862,7 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
if (status) { if (status) {
dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
vf->vf_id, status); vf->vf_id, status);
return ice_status_to_errno(status); return status;
} }
vf->num_mac++; vf->num_mac++;
@ -874,7 +874,7 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
&vf->hw_lan_addr.addr[0], vf->vf_id, &vf->hw_lan_addr.addr[0], vf->vf_id,
status); status);
return ice_status_to_errno(status); return status;
} }
vf->num_mac++; vf->num_mac++;
@ -1238,10 +1238,10 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
else else
status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
if (status && status != ICE_ERR_ALREADY_EXISTS) { if (status && status != -EEXIST) {
dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
vf->vf_id, status); vf->vf_id, status);
return ice_status_to_errno(status); return status;
} }
return 0; return 0;
@ -1261,10 +1261,10 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
else else
status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
if (status && status != ICE_ERR_DOES_NOT_EXIST) { if (status && status != -ENOENT) {
dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
vf->vf_id, status); vf->vf_id, status);
return ice_status_to_errno(status); return status;
} }
return 0; return 0;
@ -1758,7 +1758,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
if (status) { if (status) {
dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
vf->vf_id, status); vf->vf_id, status);
err = ice_status_to_errno(status); err = status;
goto release_vsi; goto release_vsi;
} }
@ -2026,7 +2026,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
status = ice_mbx_init_snapshot(&pf->hw, num_vfs); status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
if (status) if (status)
return ice_status_to_errno(status); return status;
err = ice_pci_sriov_ena(pf, num_vfs); err = ice_pci_sriov_ena(pf, num_vfs);
if (err) { if (err) {
@ -2733,12 +2733,12 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
addl_hdrs); addl_hdrs);
/* We just ignore ICE_ERR_DOES_NOT_EXIST, because /* We just ignore -ENOENT, because
* if two configurations share the same profile remove * if two configurations share the same profile remove
* one of them actually removes both, since the * one of them actually removes both, since the
* profile is deleted. * profile is deleted.
*/ */
if (status && status != ICE_ERR_DOES_NOT_EXIST) { if (status && status != -ENOENT) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
vf->vf_id, status); vf->vf_id, status);
@ -3802,7 +3802,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
} }
status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) { if (status == -EEXIST) {
dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id); vf->vf_id);
/* don't return since we might need to update /* don't return since we might need to update
@ -3896,7 +3896,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return 0; return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_DOES_NOT_EXIST) { if (status == -ENOENT) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id); vf->vf_id);
return -ENOENT; return -ENOENT;