From: Xue <xuechaojing(a)huawei.com>
commit 2df8a2952eb571e0d412966025e63edb0789baee openEuler-1.0
driver inclusion
category:bugfix
bugzilla:4472
CVE:NA
------------------------------------------------------------------------
Update Hi1822 nic driver from 1.6.2.2 to 1.8.2.8:
1.support clp.
2.problem repair and reliability enhancement. Due to the
complexity of this content, we do not describe it here. If necessary,
please contact (xue chaojing) Get the release note for
details.
Signed-off-by: Xue <xuechaojing(a)huawei.com>
Reviewed-by: chiqijun <chiqijun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Signed-off-by: Xin Hao <haoxing990(a)gmail.com>
---
drivers/net/ethernet/huawei/hinic/hinic_cfg.c | 126 ++--
.../net/ethernet/huawei/hinic/hinic_cmdq.c | 52 +-
.../net/ethernet/huawei/hinic/hinic_cmdq.h | 3 +
drivers/net/ethernet/huawei/hinic/hinic_csr.h | 2 +
.../ethernet/huawei/hinic/hinic_dbgtool_knl.c | 23 +-
drivers/net/ethernet/huawei/hinic/hinic_dcb.c | 300 +++++++++-
drivers/net/ethernet/huawei/hinic/hinic_dcb.h | 6 +
.../net/ethernet/huawei/hinic/hinic_dfx_def.h | 4 +-
drivers/net/ethernet/huawei/hinic/hinic_eqs.c | 93 ++-
drivers/net/ethernet/huawei/hinic/hinic_eqs.h | 15 +
drivers/net/ethernet/huawei/hinic/hinic_hw.h | 19 +-
.../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 2 +
.../net/ethernet/huawei/hinic/hinic_hwdev.c | 192 ++++--
.../net/ethernet/huawei/hinic/hinic_hwdev.h | 7 +-
.../net/ethernet/huawei/hinic/hinic_hwif.c | 5 +-
.../net/ethernet/huawei/hinic/hinic_hwif.h | 2 +-
drivers/net/ethernet/huawei/hinic/hinic_lld.c | 257 +++++---
drivers/net/ethernet/huawei/hinic/hinic_lld.h | 6 +-
.../net/ethernet/huawei/hinic/hinic_main.c | 33 +-
.../net/ethernet/huawei/hinic/hinic_mbox.c | 168 ++++--
.../net/ethernet/huawei/hinic/hinic_mbox.h | 21 +-
.../net/ethernet/huawei/hinic/hinic_mgmt.c | 555 +++++++++++++++++-
.../net/ethernet/huawei/hinic/hinic_mgmt.h | 76 ++-
.../huawei/hinic/hinic_mgmt_interface.h | 38 ++
.../huawei/hinic/hinic_multi_host_mgmt.c | 23 +-
.../huawei/hinic/hinic_multi_host_mgmt.h | 1 -
drivers/net/ethernet/huawei/hinic/hinic_nic.h | 1 +
.../net/ethernet/huawei/hinic/hinic_nic_cfg.c | 177 +++++-
.../net/ethernet/huawei/hinic/hinic_nic_cfg.h | 31 +
.../net/ethernet/huawei/hinic/hinic_nic_dev.h | 10 +-
.../net/ethernet/huawei/hinic/hinic_nictool.c | 55 +-
.../net/ethernet/huawei/hinic/hinic_nictool.h | 1 +
.../ethernet/huawei/hinic/hinic_port_cmd.h | 9 +-
.../net/ethernet/huawei/hinic/hinic_qe_def.h | 2 -
drivers/net/ethernet/huawei/hinic/hinic_rx.c | 1 +
.../net/ethernet/huawei/hinic/hinic_sriov.c | 34 ++
.../net/ethernet/huawei/hinic/hinic_sriov.h | 5 +
drivers/net/ethernet/huawei/hinic/hinic_tx.c | 15 +-
drivers/net/ethernet/huawei/hinic/ossl_knl.h | 8 +
.../ethernet/huawei/hinic/ossl_knl_linux.h | 14 +
40 files changed, 2061 insertions(+), 331 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
index 60a2faf30d55..3684f9ab99e0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
@@ -37,7 +37,7 @@
uint intr_mode;
uint timer_enable = 1;
-uint bloomfilter_enable = 1;
+uint bloomfilter_enable;
uint g_test_qpc_num;
uint g_test_qpc_resvd_num;
uint g_test_pagesize_reorder;
@@ -522,26 +522,27 @@ static void parse_dev_cap(struct hinic_hwdev *dev,
if (IS_NIC_TYPE(dev))
parse_l2nic_res_cap(cap, dev_cap, type);
- /* RoCE resource */
- if (IS_ROCE_TYPE(dev))
- parse_roce_res_cap(cap, dev_cap, type);
-
- /* iWARP resource */
- if (IS_IWARP_TYPE(dev))
- parse_iwarp_res_cap(cap, dev_cap, type);
/* FCoE/IOE/TOE/FC without virtulization */
if (type == TYPE_PF || type == TYPE_PPF) {
+ if (IS_FC_TYPE(dev))
+ parse_fc_res_cap(cap, dev_cap, type);
+
if (IS_FCOE_TYPE(dev))
parse_fcoe_res_cap(cap, dev_cap, type);
if (IS_TOE_TYPE(dev))
parse_toe_res_cap(cap, dev_cap, type);
-
- if (IS_FC_TYPE(dev))
- parse_fc_res_cap(cap, dev_cap, type);
}
+ /* RoCE resource */
+ if (IS_ROCE_TYPE(dev))
+ parse_roce_res_cap(cap, dev_cap, type);
+
+ /* iWARP resource */
+ if (IS_IWARP_TYPE(dev))
+ parse_iwarp_res_cap(cap, dev_cap, type);
+
if (IS_OVS_TYPE(dev))
parse_ovs_res_cap(cap, dev_cap, type);
@@ -1445,9 +1446,12 @@ int init_cfg_mgmt(struct hinic_hwdev *dev)
free_interrupt_mem:
kfree(cfg_mgmt->irq_param_info.alloc_info);
+ mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex));
+ cfg_mgmt->irq_param_info.alloc_info = NULL;
free_eq_mem:
kfree(cfg_mgmt->eq_info.eq);
+ mutex_deinit(&cfg_mgmt->eq_info.eq_mutex);
cfg_mgmt->eq_info.eq = NULL;
free_mgmt_mem:
@@ -1726,7 +1730,10 @@ bool hinic_func_for_mgmt(void *hwdev)
if (!hwdev)
return false;
- return !dev->cfg_mgmt->svc_cap.chip_svc_type;
+ if (dev->cfg_mgmt->svc_cap.chip_svc_type >= CFG_SVC_NIC_BIT0)
+ return false;
+ else
+ return true;
}
int cfg_set_func_sf_en(void *hwdev, u32 enbits, u32 enmask)
@@ -2026,7 +2033,7 @@ bool hinic_is_hwdev_mod_inited(void *hwdev, enum
hinic_hwdev_init_state state)
{
struct hinic_hwdev *dev = hwdev;
- if (!hwdev || state > HINIC_HWDEV_ALL_INITED)
+ if (!hwdev || state >= HINIC_HWDEV_MAX_INVAL_INITED)
return false;
return !!test_bit(state, &dev->func_state);
@@ -2132,33 +2139,45 @@ int hinic_init_hwdev(struct hinic_init_para *para)
struct hinic_hwdev *hwdev;
int err;
- hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
- if (!hwdev)
- return -ENOMEM;
+ if (!(*para->hwdev)) {
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
- *para->hwdev = hwdev;
- hwdev->adapter_hdl = para->adapter_hdl;
- hwdev->pcidev_hdl = para->pcidev_hdl;
- hwdev->dev_hdl = para->dev_hdl;
- hwdev->chip_node = para->chip_node;
- hwdev->ppf_hwdev = para->ppf_hwdev;
- sema_init(&hwdev->ppf_sem, 1);
-
- hwdev->chip_fault_stats = vzalloc(HINIC_CHIP_FAULT_SIZE);
- if (!hwdev->chip_fault_stats)
- goto alloc_chip_fault_stats_err;
-
- err = hinic_init_hwif(hwdev, para->cfg_reg_base, para->intr_reg_base,
- para->db_base_phy, para->db_base,
- para->dwqe_mapping);
- if (err) {
- sdk_err(hwdev->dev_hdl, "Failed to init hwif\n");
- goto init_hwif_err;
+ *para->hwdev = hwdev;
+ hwdev->adapter_hdl = para->adapter_hdl;
+ hwdev->pcidev_hdl = para->pcidev_hdl;
+ hwdev->dev_hdl = para->dev_hdl;
+ hwdev->chip_node = para->chip_node;
+ hwdev->ppf_hwdev = para->ppf_hwdev;
+ sema_init(&hwdev->ppf_sem, 1);
+
+ hwdev->chip_fault_stats = vzalloc(HINIC_CHIP_FAULT_SIZE);
+ if (!hwdev->chip_fault_stats)
+ goto alloc_chip_fault_stats_err;
+
+ err = hinic_init_hwif(hwdev, para->cfg_reg_base,
+ para->intr_reg_base,
+ para->db_base_phy, para->db_base,
+ para->dwqe_mapping);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init hwif\n");
+ goto init_hwif_err;
+ }
+ } else {
+ hwdev = *para->hwdev;
}
/* detect slave host according to BAR reg */
detect_host_mode_pre(hwdev);
+ if (IS_BMGW_SLAVE_HOST(hwdev) &&
+ (!hinic_get_master_host_mbox_enable(hwdev))) {
+ set_bit(HINIC_HWDEV_NONE_INITED, &hwdev->func_state);
+ sdk_info(hwdev->dev_hdl, "Master host not ready, init hwdev later\n");
+ return (1 << HINIC_HWDEV_ALL_INITED);
+ }
+
err = hinic_os_dep_init(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n");
@@ -2227,8 +2246,8 @@ int hinic_init_hwdev(struct hinic_init_para *para)
vfree(hwdev->chip_fault_stats);
alloc_chip_fault_stats_err:
+ sema_deinit(&hwdev->ppf_sem);
kfree(hwdev);
-
*para->hwdev = NULL;
return -EFAULT;
@@ -2237,6 +2256,8 @@ int hinic_init_hwdev(struct hinic_init_para *para)
void hinic_free_hwdev(void *hwdev)
{
struct hinic_hwdev *dev = hwdev;
+ enum hinic_hwdev_init_state state = HINIC_HWDEV_ALL_INITED;
+ int flag = 0;
if (!hwdev)
return;
@@ -2252,11 +2273,20 @@ void hinic_free_hwdev(void *hwdev)
free_capability(dev);
}
-
- hinic_uninit_comm_ch(dev);
- free_cfg_mgmt(dev);
- hinic_destroy_heartbeat(dev);
- hinic_os_dep_deinit(dev);
+ while (state > HINIC_HWDEV_NONE_INITED) {
+ if (test_bit(state, &dev->func_state)) {
+ flag = 1;
+ break;
+ }
+ state--;
+ }
+ if (flag) {
+ hinic_uninit_comm_ch(dev);
+ free_cfg_mgmt(dev);
+ hinic_destroy_heartbeat(dev);
+ hinic_os_dep_deinit(dev);
+ }
+ clear_bit(HINIC_HWDEV_NONE_INITED, &dev->func_state);
hinic_free_hwif(dev);
vfree(dev->chip_fault_stats);
sema_deinit(&dev->ppf_sem);
@@ -2291,7 +2321,7 @@ u64 hinic_get_func_feature_cap(void *hwdev)
struct hinic_hwdev *dev = hwdev;
if (!dev) {
- pr_err("Hwdev pointer is NULL for getting pf number capability\n");
+ pr_err("Hwdev pointer is NULL for getting function feature capability\n");
return 0;
}
@@ -2303,9 +2333,21 @@ enum hinic_func_mode hinic_get_func_mode(void *hwdev)
struct hinic_hwdev *dev = hwdev;
if (!dev) {
- pr_err("Hwdev pointer is NULL for getting pf number capability\n");
+ pr_err("Hwdev pointer is NULL for getting function mode\n");
return 0;
}
return dev->func_mode;
}
+
+enum hinic_service_mode hinic_get_service_mode(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting service mode\n");
+ return HINIC_WORK_MODE_INVALID;
+ }
+
+ return dev->board_info.service_mode;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
index e91c310e1894..5cb7cb37334b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
@@ -897,7 +897,7 @@ static int cmdq_params_valid(void *hwdev, struct hinic_cmd_buf
*buf_in)
return -EINVAL;
}
- if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
+ if (!buf_in->size || buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size);
return -EINVAL;
}
@@ -1219,6 +1219,13 @@ void hinic_cmdq_ceq_handler(void *handle, u32 ceqe_data)
if (!WQE_COMPLETED(ctrl_info))
break;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the cmdq wqe until we have
+ * verified the command has been processed and
+ * written back.
+ */
+ dma_rmb();
+
if (cmdq_type == HINIC_CMDQ_ASYNC)
cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci);
else
@@ -1361,24 +1368,45 @@ int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
return 0;
}
-static void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev,
- struct hinic_cmdq *cmdq)
+void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq *cmdq)
{
struct hinic_cmdq_wqe *wqe;
- struct hinic_cmdq_cmd_info *cmd_info;
- u16 ci;
+ struct hinic_cmdq_cmd_info *cmdq_info;
+ u16 ci, wqe_left, i;
+ u64 buf;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ wqe_left = cmdq->wq->q_depth - (u16)atomic_read(&cmdq->wq->delta);
+ ci = MASKED_WQE_IDX(cmdq->wq, cmdq->wq->cons_idx);
+ for (i = 0; i < wqe_left; i++, ci++) {
+ ci = MASKED_WQE_IDX(cmdq->wq, ci);
+ cmdq_info = &cmdq->cmd_infos[ci];
+
+ if (cmdq_info->cmd_type == HINIC_CMD_TYPE_SET_ARM)
+ continue;
- while ((wqe = hinic_read_wqe(cmdq->wq, 1, &ci)) != NULL) {
if (cmdq->cmdq_type == HINIC_CMDQ_ASYNC) {
- cmd_info = &cmdq->cmd_infos[ci];
- if (cmd_info->cmd_type == HINIC_CMD_TYPE_SET_ARM)
- clear_wqe_complete_bit(cmdq, wqe, ci);
- else
- cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci);
+ wqe = hinic_get_wqebb_addr(cmdq->wq, ci);
+ buf = wqe->wqe_lcmd.buf_desc.saved_async_buf;
+ wqe->wqe_lcmd.buf_desc.saved_async_buf = 0;
+
+ hinic_be32_to_cpu((void *)&buf, sizeof(u64));
+ if (buf)
+ hinic_free_cmd_buf(hwdev,
+ (struct hinic_cmd_buf *)buf);
} else {
- cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ if (cmdq_info->done) {
+ complete(cmdq_info->done);
+ cmdq_info->done = NULL;
+ cmdq_info->cmpt_code = NULL;
+ cmdq_info->direct_resp = NULL;
+ cmdq_info->errcode = NULL;
+ }
}
}
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
}
int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
index eee7e6e3352c..411a9bd71035 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
@@ -211,4 +211,7 @@ void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
struct hinic_cmdq_ctxt *cmdq_ctxt);
+void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq *cmdq);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_csr.h
b/drivers/net/ethernet/huawei/hinic/hinic_csr.h
index 7948e0d1ade9..85c32211540d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_csr.h
@@ -188,6 +188,8 @@
#define HINIC_IPSU_DIP_SIP_MASK \
((0x1 << HINIC_IPSU_SIP_OFFSET) | (0x1 << HINIC_IPSU_DIP_OFFSET))
+#define HINIC_IPSURX_VXLAN_DPORT_ADDR 0x6d4
+
/* For multi-host mgmt
* 0x75C0: bit0~3: uP write, host mode is bmwg or normal host
* bit4~7: master host ppf write when function initializing
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
index 805a101f6a30..b46b251c427c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
@@ -45,6 +45,8 @@ struct ffm_intr_info {
u32 err_csr_value;
};
+#define DBGTOOL_MSG_MAX_SIZE 2048ULL
+
#define HINIC_SELF_CMD_UP2PF_FFM 0x26
void *g_card_node_array[MAX_CARD_NUM] = {0};
@@ -158,6 +160,10 @@ long dbgtool_knl_api_cmd_read(struct dbgtool_param *para,
/* alloc cmd and ack memory*/
size = para->param.api_rd.size;
+ if (para->param.api_rd.size == 0) {
+ pr_err("Read cmd size invalid\n");
+ return -EINVAL;
+ }
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc read cmd mem fail\n");
@@ -228,6 +234,10 @@ long dbgtool_knl_api_cmd_write(struct dbgtool_param *para,
/* alloc cmd memory*/
size = para->param.api_wr.size;
+ if (para->param.api_wr.size == 0) {
+ pr_err("Write cmd size invalid\n");
+ return -EINVAL;
+ }
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc write cmd mem fail\n");
@@ -392,6 +402,12 @@ long dbgtool_knl_msg_to_up(struct dbgtool_param *para,
u16 out_size;
u8 pf_id;
+ if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) {
+ pr_err("User data(%d) more than 2KB\n",
+ para->param.msg2up.in_size);
+ return -EFAULT;
+ }
+
pf_id = para->param.msg2up.pf_id;
/* pf at most 16*/
if (pf_id >= 16) {
@@ -405,13 +421,12 @@ long dbgtool_knl_msg_to_up(struct dbgtool_param *para,
}
/* alloc buf_in and buf_out memory, apply for 2K*/
- buf_in = kzalloc(2048ULL, GFP_KERNEL);
+ buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL);
if (!buf_in) {
pr_err("Alloc buf_in mem fail\n");
return -ENOMEM;
}
-#define DBGTOOL_MSG_MAX_SIZE 2048ULL
buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0);
if (!buf_out) {
pr_err("Alloc buf_out mem fail\n");
@@ -842,4 +857,6 @@ void dbgtool_knl_deinit(void *vhwdev, void *chip_node)
unregister_chrdev_region(dbgtool_dev_id, 1);
g_dbgtool_init_flag = 0;
-} /*lint -restore*/
+}
+
+/*lint -restore*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
index 16450ef3c97d..0d1bb2a7ed48 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
@@ -92,6 +92,36 @@ void hinic_dcb_config_init(struct hinic_nic_dev *nic_dev,
dcb_cfg->pfc_state = false;
}
+void hinic_init_ieee_settings(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_ets *ets = &nic_dev->hinic_ieee_ets_default;
+ struct ieee_pfc *pfc = &nic_dev->hinic_ieee_pfc;
+ struct hinic_tc_attr *tc_attr;
+ u8 i;
+
+ memset(ets, 0x0, sizeof(struct ieee_ets));
+ memset(&nic_dev->hinic_ieee_ets, 0x0, sizeof(struct ieee_ets));
+ ets->ets_cap = dcb_cfg->pg_tcs;
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ tc_attr = &dcb_cfg->tc_cfg[i].path[HINIC_DCB_CFG_TX];
+ ets->tc_tsa[i] = tc_attr->prio_type ?
+ IEEE8021Q_TSA_STRICT : IEEE8021Q_TSA_ETS;
+ ets->tc_tx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i];
+ ets->tc_rx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i];
+ ets->prio_tc[i] = hinic_dcb_get_tc(dcb_cfg,
+ HINIC_DCB_CFG_TX, i);
+ }
+ memcpy(&nic_dev->hinic_ieee_ets, ets, sizeof(struct ieee_ets));
+
+ memset(pfc, 0x0, sizeof(struct ieee_pfc));
+ pfc->pfc_cap = dcb_cfg->pfc_tcs;
+ for (i = 0; i < dcb_cfg->pfc_tcs; i++) {
+ if (dcb_cfg->tc_cfg[i].pfc_en)
+ pfc->pfc_en |= (u8)BIT(i);
+ }
+}
+
static int hinic_set_up_cos_map(struct hinic_nic_dev *nic_dev,
u8 num_cos, u8 *cos_up)
{
@@ -218,6 +248,8 @@ int hinic_dcb_init(struct hinic_nic_dev *nic_dev)
memcpy(&nic_dev->save_dcb_cfg, &nic_dev->dcb_cfg,
sizeof(nic_dev->save_dcb_cfg));
+ hinic_init_ieee_settings(nic_dev);
+
sema_init(&nic_dev->dcb_sem, 1);
return 0;
@@ -228,9 +260,6 @@ void hinic_set_prio_tc_map(struct hinic_nic_dev *nic_dev)
struct net_device *netdev = nic_dev->netdev;
u8 prio, tc;
- if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
- return;
-
for (prio = 0; prio < HINIC_DCB_UP_MAX; prio++) {
tc = nic_dev->up_cos[prio];
if (tc == nic_dev->default_cos_id)
@@ -777,6 +806,7 @@ static int __set_hw_cos_up_map(struct hinic_nic_dev *nic_dev)
static int __set_hw_ets(struct hinic_nic_dev *nic_dev)
{
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
struct hinic_tc_attr *tc_attr;
u8 up_tc[HINIC_DCB_UP_MAX] = {0};
u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
@@ -791,19 +821,34 @@ static int __set_hw_ets(struct hinic_nic_dev *nic_dev)
continue;
cos = nic_dev->up_cos[i];
- tc = hinic_dcb_get_tc(dcb_cfg, HINIC_DCB_CFG_TX, i);
- tc_attr = &dcb_cfg->tc_cfg[tc].path[HINIC_DCB_CFG_TX];
- up_tc[cos] = tc;
- up_pgid[cos] = tc_attr->pg_id;
- up_bw[cos] = tc_attr->bw_pct;
- up_strict[cos] = tc_attr->prio_type ?
- HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
+ if ((nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) {
+ up_tc[cos] = my_ets->prio_tc[i];
+ up_pgid[cos] = my_ets->prio_tc[i];
+ up_bw[cos] = 100;
+ up_strict[i] =
+ (my_ets->tc_tsa[cos] == IEEE8021Q_TSA_STRICT) ?
+ HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
+
+ } else {
+ tc = hinic_dcb_get_tc(dcb_cfg, HINIC_DCB_CFG_TX, i);
+ tc_attr = &dcb_cfg->tc_cfg[tc].path[HINIC_DCB_CFG_TX];
+ up_tc[cos] = tc;
+ up_pgid[cos] = tc_attr->pg_id;
+ up_bw[cos] = tc_attr->bw_pct;
+ up_strict[cos] = tc_attr->prio_type ?
+ HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
+ }
}
hinic_dcb_adjust_up_bw(nic_dev, up_pgid, up_bw);
- for (i = 0; i < HINIC_DCB_PG_MAX; i++)
- pg_bw[i] = dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
+ if (nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++)
+ pg_bw[i] = my_ets->tc_tx_bw[i];
+ } else {
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++)
+ pg_bw[i] = dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
+ }
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
hinic_dcb_dump_configuration(nic_dev, up_tc, up_pgid,
@@ -813,15 +858,61 @@ static int __set_hw_ets(struct hinic_nic_dev *nic_dev)
err = hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, up_pgid,
up_bw, up_strict);
if (err) {
- hinic_err(nic_dev, drv, "Failed to set ets\n");
+ hinic_err(nic_dev, drv, "Failed to set ets with mode:%d\n",
+ nic_dev->dcbx_cap);
return err;
}
- hinic_info(nic_dev, drv, "Set ets to hw done\n");
+ hinic_info(nic_dev, drv, "Set ets to hw done with mode:%d\n",
+ nic_dev->dcbx_cap);
return 0;
}
+static int hinic_dcbnl_set_df_ieee_cfg(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct ieee_ets *ets_default = &nic_dev->hinic_ieee_ets_default;
+ struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+ struct ieee_pfc pfc = {0};
+ int err1 = 0;
+ int err2 = 0;
+ u8 flag = 0;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return 0;
+
+ if (memcmp(my_ets, ets_default, sizeof(struct ieee_ets)))
+ flag |= (u8)BIT(0);
+
+ if (my_pfc->pfc_en)
+ flag |= (u8)BIT(1);
+ if (!flag)
+ return 0;
+
+ err1 = hinic_stop_port_traffic_flow(nic_dev);
+ if (err1)
+ return err1;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ if (flag & BIT(0)) {
+ memcpy(my_ets, ets_default, sizeof(struct ieee_ets));
+ err1 = __set_hw_ets(nic_dev);
+ }
+ if (flag & BIT(1)) {
+ my_pfc->pfc_en = 0;
+ err2 = hinic_dcb_set_pfc(nic_dev->hwdev, false, pfc.pfc_en);
+ if (err2)
+ nicif_err(nic_dev, drv, netdev, "Failed to set pfc\n");
+ }
+
+ hinic_start_port_traffic_flow(nic_dev);
+
+ return (err1 | err2) ? -EINVAL : 0;
+}
+
u8 hinic_dcbnl_set_all(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
@@ -894,6 +985,156 @@ u8 hinic_dcbnl_set_all(struct net_device *netdev)
return state;
}
+static int hinic_dcbnl_ieee_get_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+
+ ets->ets_cap = my_ets->ets_cap;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ return 0;
+}
+
+static int hinic_dcbnl_ieee_set_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+ struct ieee_ets back_ets;
+ int err, i;
+ u8 max_tc = 0;
+ u16 total_bw = 0;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (!memcmp(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)) &&
+ !memcmp(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)) &&
+ !memcmp(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)) &&
+ !memcmp(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)))
+ return 0;
+
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++)
+ total_bw += ets->tc_tx_bw[i];
+ if (!total_bw)
+ return -EINVAL;
+
+ for (i = 0; i < dcb_cfg->pg_tcs; i++) {
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+ }
+ if (max_tc)
+ max_tc++;
+
+ if (max_tc > dcb_cfg->pg_tcs)
+ return -EINVAL;
+
+ max_tc = max_tc ? dcb_cfg->pg_tcs : 0;
+ memcpy(&back_ets, my_ets, sizeof(struct ieee_ets));
+ memcpy(my_ets->tc_tx_bw, ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(my_ets->tc_rx_bw, ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(my_ets->prio_tc, ets->prio_tc, sizeof(ets->prio_tc));
+ memcpy(my_ets->tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ if (max_tc != netdev_get_num_tc(netdev)) {
+ err = hinic_setup_tc(netdev, max_tc);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to setup tc with max_tc:%d, err:%d\n",
+ max_tc, err);
+ memcpy(my_ets, &back_ets, sizeof(struct ieee_ets));
+ return err;
+ }
+ }
+
+ err = hinic_stop_port_traffic_flow(nic_dev);
+ if (err)
+ return err;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ err = __set_hw_ets(nic_dev);
+
+ hinic_start_port_traffic_flow(nic_dev);
+
+ return err;
+}
+
+static int hinic_dcbnl_ieee_get_pfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
+
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->pfc_cap = my_pfc->pfc_cap;
+
+ return 0;
+}
+
+static int hinic_dcbnl_ieee_set_pfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+ int err, i;
+ u8 pfc_map, max_tc;
+ u8 outof_range_pfc = 0;
+ bool pfc_en;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (my_pfc->pfc_en == pfc->pfc_en)
+ return 0;
+
+ pfc_map = pfc->pfc_en & nic_dev->up_valid_bitmap;
+ outof_range_pfc = pfc->pfc_en & (~nic_dev->up_valid_bitmap);
+ if (outof_range_pfc)
+ netif_info(nic_dev, drv, netdev,
+ "pfc setting out of range, 0x%x will be ignored\n",
+ outof_range_pfc);
+
+ err = hinic_stop_port_traffic_flow(nic_dev);
+ if (err)
+ return err;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ pfc_en = pfc_map ? true : false;
+ max_tc = 0;
+ for (i = 0; i < dcb_cfg->pg_tcs; i++) {
+ if (my_ets->prio_tc[i] > max_tc)
+ max_tc = my_ets->prio_tc[i];
+ }
+ pfc_en = max_tc ? pfc_en : false;
+
+ err = hinic_dcb_set_pfc(nic_dev->hwdev, pfc_en, pfc_map);
+ if (err) {
+ hinic_info(nic_dev, drv,
+ "Failed to set pfc to hw with pfc_map:0x%x err:%d\n",
+ pfc_map, err);
+ hinic_start_port_traffic_flow(nic_dev);
+ return err;
+ }
+
+ hinic_start_port_traffic_flow(nic_dev);
+ my_pfc->pfc_en = pfc->pfc_en;
+ hinic_info(nic_dev, drv,
+ "Set pfc successfully with pfc_map:0x%x, pfc_en:%d\n",
+ pfc_map, pfc_en);
+
+ return 0;
+}
+
#ifdef NUMTCS_RETURNS_U8
static u8 hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
#else
@@ -955,10 +1196,13 @@ static u8 hinic_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err;
- if ((mode & DCB_CAP_DCBX_VER_IEEE) ||
+ if (((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE))
||
((mode & DCB_CAP_DCBX_LLD_MANAGED) &&
- (!(mode & DCB_CAP_DCBX_HOST))))
+ (!(mode & DCB_CAP_DCBX_HOST)))) {
+ nicif_info(nic_dev, drv, netdev,
+ "Set dcbx failed with invalid mode:%d\n", mode);
return 1;
+ }
if (nic_dev->dcbx_cap == mode)
return 0;
@@ -970,20 +1214,38 @@ static u8 hinic_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
DCB_CFG_CHG_PG_RX;
nic_dev->dcb_changes |= mask;
hinic_dcbnl_set_all(netdev);
+ } else if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ if (netdev_get_num_tc(netdev)) {
+ err = hinic_setup_tc(netdev, 0);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to setup tc with mode:%d\n",
+ mode);
+ return 1;
+ }
+ }
+ hinic_dcbnl_set_df_ieee_cfg(netdev);
} else {
err = hinic_setup_tc(netdev, 0);
if (err) {
- nicif_err(nic_dev, drv, netdev, "Failed to setup tc\n");
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to setup tc with mode:%d\n", mode);
return 1;
}
}
-
nicif_info(nic_dev, drv, netdev, "Change dcbx mode to 0x%x\n", mode);
return 0;
}
const struct dcbnl_rtnl_ops hinic_dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = hinic_dcbnl_ieee_get_ets,
+ .ieee_setets = hinic_dcbnl_ieee_set_ets,
+ .ieee_getpfc = hinic_dcbnl_ieee_get_pfc,
+ .ieee_setpfc = hinic_dcbnl_ieee_set_pfc,
+
+ /*CEE std*/
.getstate = hinic_dcbnl_get_state,
.setstate = hinic_dcbnl_set_state,
.getpermhwaddr = hinic_dcbnl_get_perm_hw_addr,
@@ -1003,6 +1265,8 @@ const struct dcbnl_rtnl_ops hinic_dcbnl_ops = {
.setnumtcs = hinic_dcbnl_setnumtcs,
.getpfcstate = hinic_dcbnl_getpfcstate,
.setpfcstate = hinic_dcbnl_setpfcstate,
+
+ /* DCBX configuration */
.getdcbx = hinic_dcbnl_getdcbx,
.setdcbx = hinic_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
index 62b995759237..0d6c0251d8da 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
@@ -19,6 +19,12 @@
#define HINIC_DCB_CFG_TX 0
#define HINIC_DCB_CFG_RX 1
+/*IEEE8021QAZ Transmission selection algorithm identifiers */
+#define IEEE8021Q_TSA_STRICT 0x0
+#define IEEE8021Q_TSA_CBSHAPER 0x1
+#define IEEE8021Q_TSA_ETS 0x2
+#define IEEE8021Q_TSA_VENDOR 0xFF
+
enum HINIC_DCB_FLAGS {
HINIC_DCB_UP_COS_SETTING,
HINIC_DCB_TRAFFIC_STOPPED,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
index efb17418b15b..ff993528ffaf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
@@ -103,7 +103,9 @@ enum driver_cmd_type {
GET_WIN_STAT = 0x60,
WIN_CSR_READ = 0x61,
WIN_CSR_WRITE = 0x62,
- WIN_API_CMD_RD = 0x63
+ WIN_API_CMD_RD = 0x63,
+
+ VM_COMPAT_TEST = 0xFF
};
enum hinic_nic_link_mode {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
index c025f3e80e8a..3d6dbc700746 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
@@ -245,6 +245,8 @@ int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event,
aeqs->aeq_hwe_cb[event] = hwe_cb;
+ set_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]);
+
return 0;
}
EXPORT_SYMBOL(hinic_aeq_register_hw_cb);
@@ -258,13 +260,17 @@ void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type
event)
{
struct hinic_aeqs *aeqs;
- if (!hwdev)
+ if (!hwdev || event >= HINIC_MAX_AEQ_EVENTS)
return;
aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
- if (event < HINIC_MAX_AEQ_EVENTS)
- aeqs->aeq_hwe_cb[event] = NULL;
+ clear_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]);
+
+ while (test_bit(HINIC_AEQ_HW_CB_RUNNING, &aeqs->aeq_hw_cb_state[event]))
+ usleep_range(900, 1000);
+
+ aeqs->aeq_hwe_cb[event] = NULL;
}
EXPORT_SYMBOL(hinic_aeq_unregister_hw_cb);
@@ -287,6 +293,8 @@ int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type
event,
aeqs->aeq_swe_cb[event] = aeq_swe_cb;
+ set_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]);
+
return 0;
}
EXPORT_SYMBOL(hinic_aeq_register_swe_cb);
@@ -300,13 +308,17 @@ void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type
event)
{
struct hinic_aeqs *aeqs;
- if (!hwdev)
+ if (!hwdev || event >= HINIC_MAX_AEQ_SW_EVENTS)
return;
aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
- if (event < HINIC_MAX_AEQ_SW_EVENTS)
- aeqs->aeq_swe_cb[event] = NULL;
+ clear_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]);
+
+ while (test_bit(HINIC_AEQ_SW_CB_RUNNING, &aeqs->aeq_sw_cb_state[event]))
+ usleep_range(900, 1000);
+
+ aeqs->aeq_swe_cb[event] = NULL;
}
EXPORT_SYMBOL(hinic_aeq_unregister_swe_cb);
@@ -329,6 +341,8 @@ int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event,
ceqs->ceq_cb[event] = callback;
+ set_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]);
+
return 0;
}
EXPORT_SYMBOL(hinic_ceq_register_cb);
@@ -342,13 +356,17 @@ void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event
event)
{
struct hinic_ceqs *ceqs;
- if (!hwdev)
+ if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS)
return;
ceqs = ((struct hinic_hwdev *)hwdev)->ceqs;
- if (event < HINIC_MAX_CEQ_EVENTS)
- ceqs->ceq_cb[event] = NULL;
+ clear_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]);
+
+ while (test_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]))
+ usleep_range(900, 1000);
+
+ ceqs->ceq_cb[event] = NULL;
}
EXPORT_SYMBOL(hinic_ceq_unregister_cb);
@@ -390,8 +408,13 @@ static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
return;
}
- if (ceqs->ceq_cb[event])
+ set_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]);
+
+ if (ceqs->ceq_cb[event] &&
+ test_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]))
ceqs->ceq_cb[event](hwdev, ceqe_data);
+
+ clear_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]);
}
/**
@@ -421,6 +444,13 @@ static bool aeq_irq_handler(struct hinic_eq *eq)
if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
return false;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the cmdq wqe until we have
+ * verified the command has been processed and
+ * written back.
+ */
+ dma_rmb();
+
event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
ucode_event = event;
@@ -429,19 +459,31 @@ static bool aeq_irq_handler(struct hinic_eq *eq)
HINIC_STATEFULL_EVENT :
HINIC_STATELESS_EVENT;
aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data));
- if (aeqs->aeq_swe_cb[sw_event]) {
+ set_bit(HINIC_AEQ_SW_CB_RUNNING,
+ &aeqs->aeq_sw_cb_state[sw_event]);
+ if (aeqs->aeq_swe_cb[sw_event] &&
+ test_bit(HINIC_AEQ_SW_CB_REG,
+ &aeqs->aeq_sw_cb_state[sw_event])) {
lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev,
ucode_event,
aeqe_data);
hinic_swe_fault_handler(aeqs->hwdev, lev,
ucode_event, aeqe_data);
}
+ clear_bit(HINIC_AEQ_SW_CB_RUNNING,
+ &aeqs->aeq_sw_cb_state[sw_event]);
} else {
if (event < HINIC_MAX_AEQ_EVENTS) {
size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
- if (aeqs->aeq_hwe_cb[event])
+ set_bit(HINIC_AEQ_HW_CB_RUNNING,
+ &aeqs->aeq_hw_cb_state[event]);
+ if (aeqs->aeq_hwe_cb[event] &&
+ test_bit(HINIC_AEQ_HW_CB_REG,
+ &aeqs->aeq_hw_cb_state[event]))
aeqs->aeq_hwe_cb[event](aeqs->hwdev,
aeqe_pos->aeqe_data, size);
+ clear_bit(HINIC_AEQ_HW_CB_RUNNING,
+ &aeqs->aeq_hw_cb_state[event]);
} else {
sdk_warn(eq->hwdev->dev_hdl,
"Unknown aeq hw event %d\n", event);
@@ -837,10 +879,10 @@ static int alloc_eq_pages(struct hinic_eq *eq)
eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num];
eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num];
if (eq->dma_addr_for_free[pg_num] & (eq->page_size - 1)) {
- sdk_warn(eq->hwdev->dev_hdl,
+ sdk_info(eq->hwdev->dev_hdl,
"Address is not aligned to %u-bytes as hardware required\n",
eq->page_size);
- sdk_warn(eq->hwdev->dev_hdl, "Change eq's page size %u\n",
+ sdk_info(eq->hwdev->dev_hdl, "Change eq's page size %u\n",
((eq->page_size) >> 1));
eq->dma_addr[pg_num] = ALIGN
(eq->dma_addr_for_free[pg_num],
@@ -959,9 +1001,12 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev,
u16 q_id,
eq->type = type;
eq->eq_len = q_len;
- /* Clear PI and CI, also clear the ARM bit */
- hinic_hwif_write_reg(eq->hwdev->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
- hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+ /* clear eq_len to force eqe drop in hardware */
+ if (eq->type == HINIC_AEQ)
+ hinic_hwif_write_reg(eq->hwdev->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+ else
+ set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
eq->cons_idx = 0;
eq->wrapped = 0;
@@ -993,6 +1038,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev,
u16 q_id,
goto init_eq_ctrls_err;
}
+ hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
set_eq_cons_idx(eq, HINIC_EQ_ARMED);
if (type == HINIC_AEQ) {
@@ -1265,6 +1311,7 @@ void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info
*irqs,
void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
{
+ struct hinic_aeq_elem *aeqe_pos;
struct hinic_eq *eq;
u32 addr, ci, pi;
int q_id;
@@ -1275,8 +1322,10 @@ void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
ci = hinic_hwif_read_reg(hwdev->hwif, addr);
addr = EQ_PROD_IDX_REG_ADDR(eq);
pi = hinic_hwif_read_reg(hwdev->hwif, addr);
- sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%x, pi: 0x%x\n",
- q_id, ci, pi);
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+ sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x,
wrap: %d, desc: 0x%x\n",
+ q_id, ci, pi, work_busy(&eq->aeq_work.work),
+ eq->wrapped, be32_to_cpu(aeqe_pos->desc));
}
}
@@ -1292,8 +1341,10 @@ void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
ci = hinic_hwif_read_reg(hwdev->hwif, addr);
addr = EQ_PROD_IDX_REG_ADDR(eq);
pi = hinic_hwif_read_reg(hwdev->hwif, addr);
- sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%x, sw_ci: 0x%x, pi: 0x%x\n",
- q_id, ci, eq->cons_idx, pi);
+ sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x,
tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
+ q_id, ci, eq->cons_idx, pi,
+ tasklet_state(&eq->ceq_tasklet),
+ eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq))));
sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n",
jiffies_to_msecs(jiffies - eq->hard_intr_jif));
sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n",
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
index bc87090479a9..102dd189bb93 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
@@ -102,12 +102,21 @@ struct hinic_aeq_elem {
u32 desc;
};
+enum hinic_aeq_cb_state {
+ HINIC_AEQ_HW_CB_REG = 0,
+ HINIC_AEQ_HW_CB_RUNNING,
+ HINIC_AEQ_SW_CB_REG,
+ HINIC_AEQ_SW_CB_RUNNING,
+};
+
struct hinic_aeqs {
struct hinic_hwdev *hwdev;
hinic_aeq_hwe_cb aeq_hwe_cb[HINIC_MAX_AEQ_EVENTS];
hinic_aeq_swe_cb aeq_swe_cb[HINIC_MAX_AEQ_SW_EVENTS];
+ unsigned long aeq_hw_cb_state[HINIC_MAX_AEQ_EVENTS];
+ unsigned long aeq_sw_cb_state[HINIC_MAX_AEQ_SW_EVENTS];
struct hinic_eq aeq[HINIC_MAX_AEQS];
u16 num_aeqs;
@@ -115,11 +124,17 @@ struct hinic_aeqs {
struct workqueue_struct *workq;
};
+enum hinic_ceq_cb_state {
+ HINIC_CEQ_CB_REG = 0,
+ HINIC_CEQ_CB_RUNNING,
+};
+
struct hinic_ceqs {
struct hinic_hwdev *hwdev;
hinic_ceq_event_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
void *ceq_data[HINIC_MAX_CEQ_EVENTS];
+ unsigned long ceq_cb_state[HINIC_MAX_CEQ_EVENTS];
struct hinic_eq ceq[HINIC_MAX_CEQS];
u16 num_ceqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw.h
b/drivers/net/ethernet/huawei/hinic/hinic_hw.h
index 8f0cec464328..9850a8ed6e22 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw.h
@@ -120,6 +120,11 @@ int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type,
int hinic_ppf_tmr_start(void *hwdev);
int hinic_ppf_tmr_stop(void *hwdev);
+/*CLP*/
+int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
/* FOR windows */
bool hinic_eq_intr_handler(void *hwdev, int msix_entry_idx);
@@ -280,13 +285,15 @@ struct card_node {
};
enum hinic_hwdev_init_state {
- HINIC_HWDEV_NONE_INITED,
+ HINIC_HWDEV_NONE_INITED = 0,
+ HINIC_HWDEV_CLP_INITED,
HINIC_HWDEV_AEQ_INITED,
HINIC_HWDEV_MGMT_INITED,
HINIC_HWDEV_MBOX_INITED,
HINIC_HWDEV_CMDQ_INITED,
HINIC_HWDEV_COMM_CH_INITED,
HINIC_HWDEV_ALL_INITED,
+ HINIC_HWDEV_MAX_INVAL_INITED
};
enum hinic_func_mode {
@@ -377,6 +384,15 @@ bool hinic_is_hwdev_mod_inited(void *hwdev, enum
hinic_hwdev_init_state state);
enum hinic_func_mode hinic_get_func_mode(void *hwdev);
u64 hinic_get_func_feature_cap(void *hwdev);
+enum hinic_service_mode {
+ HINIC_WORK_MODE_OVS = 0,
+ HINIC_WORK_MODE_UNKNOWN,
+ HINIC_WORK_MODE_NIC,
+ HINIC_WORK_MODE_INVALID = 0xFF,
+};
+
+enum hinic_service_mode hinic_get_service_mode(void *hwdev);
+
int hinic_slq_init(void *dev, int num_wqs);
void hinic_slq_uninit(void *dev);
int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth,
@@ -718,5 +734,6 @@ int hinic_mbox_ppf_to_vf(void *hwdev,
u16 *out_size, u32 timeout);
int hinic_get_card_present_state(void *hwdev, bool *card_present_state);
+int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index 45d181baccd2..d4bb6fab3462 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -524,6 +524,7 @@ struct hinic_micro_log_info {
int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info);
void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info);
+void hinic_disable_mgmt_msg_report(void *hwdev);
void hinic_set_func_deinit_flag(void *hwdev);
void hinic_flush_mgmt_workq(void *hwdev);
@@ -542,5 +543,6 @@ struct hinic_func_nic_state {
int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state);
int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en);
+bool hinic_get_master_host_mbox_enable(void *hwdev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
index ffd4104b5a8f..74582e62bf69 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
@@ -789,13 +789,6 @@ void hinic_force_complete_all(void *hwdev)
{
struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
struct hinic_recv_msg *recv_resp_msg;
- struct hinic_cmdq_cmd_info *cmdq_info;
- struct hinic_cmdq *cmdq;
- int pi = 0;
- int ci = 0;
- int delta = 0;
- int i = 0;
- u16 max_index = 0;
set_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state);
@@ -808,32 +801,11 @@ void hinic_force_complete_all(void *hwdev)
}
}
- if (!hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_CMDQ_INITED))
- goto out;
-
- cmdq = &dev->cmdqs->cmdq[HINIC_CMDQ_SYNC];
- pi = cmdq->wq->prod_idx;
- pi = MASKED_WQE_IDX(cmdq->wq, pi);
- ci = cmdq->wq->cons_idx;
- ci = MASKED_WQE_IDX(cmdq->wq, ci);
- max_index = (cmdq->wq->q_depth) - 1;
- delta = (pi >= ci) ? (pi - ci) : ((max_index - ci) + pi);
-
- for (; i < delta; i++) {
- cmdq_info = &cmdq->cmd_infos[ci];
- spin_lock_bh(&cmdq->cmdq_lock);
- if (cmdq_info->done) {
- complete(cmdq_info->done);
- cmdq_info->done = NULL;
- atomic_add(1, &cmdq->wq->delta);
- cmdq->wq->cons_idx += 1;
- }
- spin_unlock_bh(&cmdq->cmdq_lock);
- ci++;
- ci = MASKED_WQE_IDX(cmdq->wq, ci);
- }
+ /* only flush sync cmdq to avoid blocking remove */
+ if (hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_CMDQ_INITED))
+ hinic_cmdq_flush_cmd(hwdev,
+ &dev->cmdqs->cmdq[HINIC_CMDQ_SYNC]);
-out:
clear_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state);
}
@@ -1087,6 +1059,32 @@ int hinic_mbox_to_vf(void *hwdev,
}
EXPORT_SYMBOL(hinic_mbox_to_vf);
+int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (!dev->chip_present_flag)
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev))
+ return -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CLP_INITED))
+ return -EPERM;
+
+ err = hinic_pf_clp_to_mgmt(dev, mod, cmd, buf_in,
+ in_size, buf_out, out_size);
+
+ return err;
+}
+
/**
* hinic_cpu_to_be32 - convert data to big endian 32 bit format
* @data: the data to convert
@@ -1333,7 +1331,7 @@ static int wait_cmdq_stop(struct hinic_hwdev *hwdev)
cmdqs->status &= ~HINIC_CMDQ_ENABLE;
- while (cnt < HINIC_WAIT_CMDQ_IDLE_TIMEOUT) {
+ while (cnt < HINIC_WAIT_CMDQ_IDLE_TIMEOUT && hwdev->chip_present_flag) {
err = 0;
cmdq_type = HINIC_CMDQ_SYNC;
for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
@@ -2088,6 +2086,9 @@ static int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev)
comm_pf_mbox_handler);
hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC,
sw_func_pf_mbox_handler);
+ } else {
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ vf_to_pf_handler);
}
set_bit(HINIC_HWDEV_MBOX_INITED, &hwdev->func_state);
@@ -2100,6 +2101,9 @@ static void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev)
hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_FROM_FUNC);
hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_SEND_RSLT);
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC);
+
hinic_func_to_func_free(hwdev);
}
@@ -2128,7 +2132,7 @@ static int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
static void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
{
- if (hinic_func_type(hwdev) == TYPE_VF &&
+ if (hinic_func_type(hwdev) == TYPE_VF ||
!FUNC_SUPPORT_MGMT(hwdev))
return; /* VF do not support send msg to mgmt directly */
@@ -2139,6 +2143,33 @@ static void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
hinic_pf_to_mgmt_free(hwdev);
}
+static int hinic_comm_clp_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return 0;
+
+ err = hinic_clp_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ set_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic_comm_clp_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return;
+
+ clear_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state);
+ hinic_clp_pf_to_mgmt_free(hwdev);
+}
+
static int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
{
int err;
@@ -2261,11 +2292,18 @@ static int __get_func_misc_info(struct hinic_hwdev *hwdev)
err = hinic_get_board_info(hwdev, &hwdev->board_info);
if (err) {
+ /*For the pf/vf of slave host, return error */
+ if (hinic_pcie_itf_id(hwdev))
+ return err;
+
/* VF can't get board info in early version */
if (!HINIC_IS_VF(hwdev)) {
sdk_err(hwdev->dev_hdl, "Get board info failed\n");
return err;
}
+
+ memset(&hwdev->board_info, 0xff,
+ sizeof(struct hinic_board_info));
}
err = hinic_get_mgmt_version(hwdev, hwdev->mgmt_ver);
@@ -2302,15 +2340,21 @@ int hinic_init_comm_ch(struct hinic_hwdev *hwdev)
int err;
if (IS_BMGW_SLAVE_HOST(hwdev) &&
- (!get_master_host_mbox_enable(hwdev))) {
+ (!hinic_get_master_host_mbox_enable(hwdev))) {
sdk_err(hwdev->dev_hdl, "Master host not initialized\n");
return -EFAULT;
}
+ err = hinic_comm_clp_to_mgmt_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init clp\n");
+ return err;
+ }
+
err = hinic_comm_aeqs_init(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n");
- return err;
+ goto aeqs_init_err;
}
err = hinic_comm_pf_to_mgmt_init(hwdev);
@@ -2421,6 +2465,9 @@ int hinic_init_comm_ch(struct hinic_hwdev *hwdev)
msg_init_err:
hinic_comm_aeqs_free(hwdev);
+aeqs_init_err:
+ hinic_comm_clp_to_mgmt_free(hwdev);
+
return err;
}
@@ -2457,6 +2504,9 @@ static void __uninit_comm_module(struct hinic_hwdev *hwdev,
case HINIC_HWDEV_AEQ_INITED:
hinic_comm_aeqs_free(hwdev);
break;
+ case HINIC_HWDEV_CLP_INITED:
+ hinic_comm_clp_to_mgmt_free(hwdev);
+ break;
default:
break;
}
@@ -3841,13 +3891,25 @@ static void __print_cable_info(struct hinic_hwdev *hwdev,
memcpy(tmp_vendor, info->vendor_name,
sizeof(info->vendor_name));
snprintf(tmp_str, sizeof(tmp_str) - 1,
- "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps",
- tmp_vendor, info->sfp_type ? "SFP" : "QSFP", port_type,
- info->cable_length, info->cable_max_speed);
- if (info->port_type != LINK_PORT_COPPER)
+ "Vendor: %s, %s, length: %um, max_speed: %uGbps",
+ tmp_vendor, port_type, info->cable_length,
+ info->cable_max_speed);
+ if (info->port_type == LINK_PORT_FIBRE ||
+ info->port_type == LINK_PORT_AOC) {
snprintf(tmp_str, sizeof(tmp_str) - 1,
- "%s, Temperature: %u", tmp_str,
- info->cable_temp);
+ "%s, %s, Temperature: %u", tmp_str,
+ info->sfp_type ? "SFP" : "QSFP", info->cable_temp);
+ if (info->sfp_type) {
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "%s, rx power: %uuW, tx power: %uuW",
+ tmp_str, info->power[0], info->power[1]);
+ } else {
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "%s, rx power: %uuw %uuW %uuW %uuW",
+ tmp_str, info->power[0], info->power[1],
+ info->power[2], info->power[3]);
+ }
+ }
sdk_info(hwdev->dev_hdl, "Cable information: %s\n",
tmp_str);
@@ -4836,6 +4898,8 @@ int hinic_read_reg(void *hwdev, u32 reg_addr, u32 *val)
static void hinic_exec_recover_cb(struct hinic_hwdev *hwdev,
struct hinic_fault_recover_info *info)
{
+ sdk_info(hwdev->dev_hdl, "Enter hinic_exec_recover_cb\n");
+
if (!hinic_get_chip_present_flag(hwdev)) {
sdk_err(hwdev->dev_hdl, "Device surprised removed, abort recover\n");
return;
@@ -4972,9 +5036,12 @@ int hinic_set_ip_check(void *hwdev, bool ip_check_ctl)
int ret;
int i;
- if (!hwdev || hinic_func_type(hwdev) == TYPE_VF)
+ if (!hwdev)
return -EINVAL;
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
for (i = 0; i <= HINIC_IPSU_CHANNEL_NUM; i++) {
ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU,
(HINIC_IPSU_CHANNEL0_ADDR +
@@ -5017,3 +5084,42 @@ int hinic_get_card_present_state(void *hwdev, bool
*card_present_state)
return 0;
}
EXPORT_SYMBOL(hinic_get_card_present_state);
+
+void hinic_disable_mgmt_msg_report(void *hwdev)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+
+ hinic_set_pf_status(hw_dev->hwif, HINIC_PF_STATUS_INIT);
+}
+
+int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port)
+{
+ u32 val = 0;
+ int ret;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU,
+ HINIC_IPSURX_VXLAN_DPORT_ADDR, &val);
+ if (ret)
+ return ret;
+
+ nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Update VxLAN UDP dest port: cur port:%u, new port:%u",
+ be32_to_cpu(val), udp_port);
+
+ if (be32_to_cpu(val) == udp_port)
+ return 0;
+
+ udp_port = cpu_to_be32(udp_port);
+ ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU,
+ HINIC_IPSURX_VXLAN_DPORT_ADDR, udp_port);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
index ddec645d493a..e0d76b192080 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
@@ -87,7 +87,7 @@ enum hinic_node_id {
HINIC_NODE_ID_MAX = 22
};
-#define HINIC_HWDEV_INIT_MODES_MASK ((1 << HINIC_HWDEV_ALL_INITED) - 1)
+#define HINIC_HWDEV_INIT_MODES_MASK ((1UL << HINIC_HWDEV_ALL_INITED) - 1)
enum hinic_hwdev_func_state {
HINIC_HWDEV_FUNC_INITED = HINIC_HWDEV_ALL_INITED,
@@ -250,6 +250,7 @@ struct hinic_hwdev {
struct hinic_mbox_func_to_func *func_to_func;
struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt;
struct hinic_cmdqs *cmdqs;
@@ -345,6 +346,10 @@ int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod,
u8 cmd,
void *buf_in, u16 in_size,
void *buf_out, u16 *out_size, u32 timeout);
+int hinic_pf_send_clp_cmd(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit);
void hinic_fault_work_handler(struct work_struct *work);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
index 2b3f66e30072..def8c2b08eae 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
@@ -523,7 +523,8 @@ static void __print_selftest_reg(struct hinic_hwdev *hwdev)
addr = HINIC_CSR_FUNC_ATTR0_ADDR;
attr0 = hinic_hwif_read_reg(hwdev->hwif, addr);
- if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF)
+ if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF &&
+ !HINIC_AF0_GET(attr0, PCI_INTF_IDX))
sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n",
hinic_hwif_read_reg(hwdev->hwif,
HINIC_SELFTEST_RESULT));
@@ -583,6 +584,8 @@ int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base,
}
disable_all_msix(hwdev);
+ /* disable mgmt cpu report any event */
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
pr_info("global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf:
%d\n",
hwif->attr.func_global_idx, hwif->attr.func_type,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
index c9a89e99f246..e5ac81a8eb57 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
@@ -18,7 +18,7 @@
#include "hinic_hwdev.h"
-#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 2000
+#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000
struct hinic_free_db_area {
u32 db_idx[HINIC_DB_MAX_AREAS];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c
b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
index 29c27a1d77b2..1f0461564dde 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_lld.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
@@ -90,6 +90,8 @@ MODULE_PARM_DESC(disable_vf_load,
enum {
HINIC_FUNC_IN_REMOVE = BIT(0),
+ HINIC_FUNC_PRB_ERR = BIT(1),
+ HINIC_FUNC_PRB_DELAY = BIT(2),
};
/* Structure pcidev private*/
@@ -127,6 +129,8 @@ struct hinic_pcidev {
unsigned long flag;
struct work_struct slave_nic_work;
+ struct workqueue_struct *slave_nic_init_workq;
+ struct delayed_work slave_nic_init_dwork;
bool nic_cur_enable;
bool nic_des_enable;
};
@@ -1453,7 +1457,7 @@ int hinic_get_device_id(void *hwdev, u16 *dev_id)
return 0;
}
-int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id)
+int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid)
{
struct card_node *chip_node = NULL;
struct hinic_pcidev *dev;
@@ -1466,6 +1470,7 @@ int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id)
list_for_each_entry(dev, &chip_node->func_list, node) {
if (hinic_physical_port_id(dev->hwdev) == port_id) {
*pf_id = hinic_global_func_id(dev->hwdev);
+ *isvalid = 1;
break;
}
}
@@ -1744,9 +1749,15 @@ static int __set_nic_func_state(struct hinic_pcidev *pci_adapter)
int err;
bool enable_nic;
- hinic_get_func_nic_enable(pci_adapter->hwdev,
- hinic_global_func_id(pci_adapter->hwdev),
- &enable_nic);
+ err = hinic_get_func_nic_enable(pci_adapter->hwdev,
+ hinic_global_func_id
+ (pci_adapter->hwdev),
+ &enable_nic);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to get nic state\n");
+ return err;
+ }
+
if (enable_nic) {
err = attach_uld(pci_adapter, SERVICE_T_NIC,
&g_uld_info[SERVICE_T_NIC]);
@@ -1789,7 +1800,13 @@ static void __multi_host_mgmt(struct hinic_pcidev *dev,
/* find func_idx pci_adapter and disable or enable nic */
lld_dev_hold();
list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
- if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag))
+ continue;
+
+ if (des_dev->init_state <
+ HINIC_INIT_STATE_DBGTOOL_INITED &&
+ !test_bit(HINIC_FUNC_PRB_ERR,
+ &des_dev->flag))
continue;
if (hinic_global_func_id(des_dev->hwdev) !=
@@ -1798,7 +1815,10 @@ static void __multi_host_mgmt(struct hinic_pcidev *dev,
if (des_dev->init_state <
HINIC_INIT_STATE_DBGTOOL_INITED) {
- nic_state->status = 1;
+ nic_state->status =
+ test_bit(HINIC_FUNC_PRB_ERR,
+ &des_dev->flag) ? 1 : 0;
+
break;
}
@@ -1918,10 +1938,20 @@ static int alloc_chip_node(struct hinic_pcidev *pci_adapter)
if (!pci_is_root_bus(pci_adapter->pcidev->bus))
parent_bus_number = pci_adapter->pcidev->bus->parent->number;
- list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
- if (chip_node->dp_bus_num == parent_bus_number) {
- pci_adapter->chip_node = chip_node;
- return 0;
+ if (parent_bus_number != 0) {
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (chip_node->dp_bus_num == parent_bus_number) {
+ pci_adapter->chip_node = chip_node;
+ return 0;
+ }
+ }
+ } else if (pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF ||
+ pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF_HV) {
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (chip_node) {
+ pci_adapter->chip_node = chip_node;
+ return 0;
+ }
}
}
@@ -2191,6 +2221,32 @@ static void hinic_notify_ppf_reg(struct hinic_pcidev *pci_adapter)
lld_unlock_chip_node();
}
+#ifdef CONFIG_X86
+/**
+ * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma
+ * order register to zero
+ * @pci_adapter: pci_adapter
+ **/
+/*lint -save -e40 */
+void cfg_order_reg(struct hinic_pcidev *pci_adapter)
+{
+ u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56};
+ struct cpuinfo_x86 *cpuinfo;
+ u32 i;
+
+ if (HINIC_FUNC_IS_VF(pci_adapter->hwdev))
+ return;
+
+ cpuinfo = &cpu_data(0);
+ for (i = 0; i < sizeof(cpu_model); i++) {
+ if (cpu_model[i] == cpuinfo->x86_model)
+ hinic_set_pcie_order_cfg(pci_adapter->hwdev);
+ }
+}
+
+/*lint -restore*/
+#endif
+
static int hinic_func_init(struct pci_dev *pdev,
struct hinic_pcidev *pci_adapter)
{
@@ -2214,10 +2270,23 @@ static int hinic_func_init(struct pci_dev *pdev,
sdk_err(&pdev->dev, "Failed to initialize hardware device\n");
return -EFAULT;
} else if (err > 0) {
- sdk_err(&pdev->dev, "Initialize hardware device partitial failed\n");
- hinic_detect_version_compatible(pci_adapter);
- hinic_notify_ppf_reg(pci_adapter);
- pci_adapter->init_state = HINIC_INIT_STATE_HW_PART_INITED;
+ if (err == (1 << HINIC_HWDEV_ALL_INITED) &&
+ pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED) {
+ pci_adapter->init_state = HINIC_INIT_STATE_HW_IF_INITED;
+ sdk_info(&pdev->dev,
+ "Initialize hardware device later\n");
+ queue_delayed_work(pci_adapter->slave_nic_init_workq,
+ &pci_adapter->slave_nic_init_dwork,
+ HINIC_SLAVE_NIC_DELAY_TIME);
+ set_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
+ } else if (err != (1 << HINIC_HWDEV_ALL_INITED)) {
+ sdk_err(&pdev->dev,
+ "Initialize hardware device partitial failed\n");
+ hinic_detect_version_compatible(pci_adapter);
+ hinic_notify_ppf_reg(pci_adapter);
+ pci_adapter->init_state =
+ HINIC_INIT_STATE_HW_PART_INITED;
+ }
return -EFAULT;
}
@@ -2277,6 +2346,20 @@ static int hinic_func_init(struct pci_dev *pdev,
}
}
#endif
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) {
+ err = sysfs_create_group(&pdev->dev.kobj, &hinic_attr_group);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to create sysfs group\n");
+ return -EFAULT;
+ }
+ }
+
+#ifdef CONFIG_X86
+ cfg_order_reg(pci_adapter);
+#endif
+
+ sdk_info(&pdev->dev, "Pcie device probed\n");
+ pci_adapter->init_state = HINIC_INIT_STATE_ALL_INITED;
return 0;
}
@@ -2285,8 +2368,14 @@ static void hinic_func_deinit(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ /* When function deinit, disable mgmt initiative report events firstly,
+ * then flush mgmt work-queue.
+ */
+ hinic_disable_mgmt_msg_report(pci_adapter->hwdev);
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
+ hinic_flush_mgmt_workq(pci_adapter->hwdev);
+
hinic_set_func_deinit_flag(pci_adapter->hwdev);
- hinic_flush_mgmt_workq(pci_adapter->hwdev);
if (pci_adapter->init_state >= HINIC_INIT_STATE_NIC_INITED) {
detach_ulds(pci_adapter);
@@ -2301,36 +2390,18 @@ static void hinic_func_deinit(struct pci_dev *pdev)
}
hinic_notify_ppf_unreg(pci_adapter);
- if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
- hinic_free_hwdev(pci_adapter->hwdev);
-}
-
-#ifdef CONFIG_X86
-/**
- * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma
- * order register to zero
- * @pci_adapter: pci_adapter
- **/
-/*lint -save -e40 */
-void cfg_order_reg(struct hinic_pcidev *pci_adapter)
-{
- u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56};
- struct cpuinfo_x86 *cpuinfo;
- u32 i;
-
- if (HINIC_FUNC_IS_VF(pci_adapter->hwdev))
- return;
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) {
+ /* Remove the current node from node-list first,
+ * then it's safe to free hwdev
+ */
+ lld_lock_chip_node();
+ list_del(&pci_adapter->node);
+ lld_unlock_chip_node();
- cpuinfo = &cpu_data(0);
- for (i = 0; i < sizeof(cpu_model); i++) {
- if (cpu_model[i] == cpuinfo->x86_model)
- hinic_set_pcie_order_cfg(pci_adapter->hwdev);
+ hinic_free_hwdev(pci_adapter->hwdev);
}
}
-/*lint -restore*/
-#endif
-
static void wait_tool_unused(void)
{
u32 loop_cnt = 0;
@@ -2372,18 +2443,20 @@ static void hinic_remove(struct pci_dev *pdev)
return;
sdk_info(&pdev->dev, "Pcie device remove begin\n");
-
+#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) {
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
return;
}
+#endif
+ cancel_delayed_work_sync(&pci_adapter->slave_nic_init_dwork);
+ flush_workqueue(pci_adapter->slave_nic_init_workq);
+ destroy_workqueue(pci_adapter->slave_nic_init_workq);
- if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED)
hinic_detect_hw_present(pci_adapter->hwdev);
- cancel_work_sync(&pci_adapter->slave_nic_work);
-
switch (pci_adapter->init_state) {
case HINIC_INIT_STATE_ALL_INITED:
if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev))
@@ -2399,17 +2472,22 @@ static void hinic_remove(struct pci_dev *pdev)
}
/*lint -fallthrough*/
case HINIC_INIT_STATE_DBGTOOL_INITED:
- case HINIC_INIT_STATE_HW_PART_INITED:
case HINIC_INIT_STATE_HWDEV_INITED:
+ case HINIC_INIT_STATE_HW_PART_INITED:
+ case HINIC_INIT_STATE_HW_IF_INITED:
case HINIC_INIT_STATE_PCI_INITED:
set_bit(HINIC_FUNC_IN_REMOVE, &pci_adapter->flag);
wait_tool_unused();
+ lld_lock_chip_node();
+ cancel_work_sync(&pci_adapter->slave_nic_work);
+ lld_unlock_chip_node();
- if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED)
hinic_func_deinit(pdev);
lld_lock_chip_node();
- list_del(&pci_adapter->node);
+ if (pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ list_del(&pci_adapter->node);
nictool_k_uninit();
free_chip_node(pci_adapter);
lld_unlock_chip_node();
@@ -2426,6 +2504,54 @@ static void hinic_remove(struct pci_dev *pdev)
sdk_info(&pdev->dev, "Pcie device removed\n");
}
+static void slave_host_init_delay_work(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic_pcidev *pci_adapter = container_of(delay,
+ struct hinic_pcidev, slave_nic_init_dwork);
+ struct pci_dev *pdev = pci_adapter->pcidev;
+ struct card_node *chip_node = pci_adapter->chip_node;
+ int found = 0;
+ struct hinic_pcidev *ppf_pcidev = NULL;
+ int err;
+
+ if (!hinic_get_master_host_mbox_enable(pci_adapter->hwdev)) {
+ queue_delayed_work(pci_adapter->slave_nic_init_workq,
+ &pci_adapter->slave_nic_init_dwork,
+ HINIC_SLAVE_NIC_DELAY_TIME);
+ return;
+ }
+ if (hinic_func_type(pci_adapter->hwdev) == TYPE_PPF) {
+ err = hinic_func_init(pdev, pci_adapter);
+ clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
+ if (err)
+ set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
+ return;
+ }
+
+ /*Make sure the PPF must be the first one.*/
+ lld_dev_hold();
+ list_for_each_entry(ppf_pcidev, &chip_node->func_list, node) {
+ if (ppf_pcidev &&
+ hinic_func_type(ppf_pcidev->hwdev) == TYPE_PPF) {
+ found = 1;
+ break;
+ }
+ }
+ lld_dev_put();
+ if (found && ppf_pcidev->init_state == HINIC_INIT_STATE_ALL_INITED) {
+ err = hinic_func_init(pdev, pci_adapter);
+ clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
+ if (err)
+ set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
+ return;
+ }
+
+ queue_delayed_work(pci_adapter->slave_nic_init_workq,
+ &pci_adapter->slave_nic_init_dwork,
+ HINIC_SLAVE_NIC_DELAY_TIME);
+}
+
static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hinic_pcidev *pci_adapter;
@@ -2443,7 +2569,8 @@ static int hinic_probe(struct pci_dev *pdev, const struct
pci_device_id *id)
#endif
pci_adapter = pci_get_drvdata(pdev);
-
+ clear_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
+ clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
err = mapping_bar(pdev, pci_adapter);
if (err) {
sdk_err(&pdev->dev, "Failed to map bar\n");
@@ -2452,6 +2579,16 @@ static int hinic_probe(struct pci_dev *pdev, const struct
pci_device_id *id)
pci_adapter->id = *id;
INIT_WORK(&pci_adapter->slave_nic_work, slave_host_mgmt_work);
+ pci_adapter->slave_nic_init_workq =
+ create_singlethread_workqueue(HINIC_SLAVE_NIC_DELAY);
+ if (!pci_adapter->slave_nic_init_workq) {
+ sdk_err(&pdev->dev,
+ "Failed to create work queue:%s\n",
+ HINIC_SLAVE_NIC_DELAY);
+ goto ceate_nic_delay_work_fail;
+ }
+ INIT_DELAYED_WORK(&pci_adapter->slave_nic_init_dwork,
+ slave_host_init_delay_work);
/* if chip information of pcie function exist,
* add the function into chip
@@ -2480,25 +2617,11 @@ static int hinic_probe(struct pci_dev *pdev, const struct
pci_device_id *id)
if (err)
goto func_init_err;
- if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) {
- err = sysfs_create_group(&pdev->dev.kobj, &hinic_attr_group);
- if (err) {
- sdk_err(&pdev->dev, "Failed to create sysfs group\n");
- goto sysfs_create_err;
- }
- }
-
-#ifdef CONFIG_X86
- cfg_order_reg(pci_adapter);
-#endif
-
- sdk_info(&pdev->dev, "Pcie device probed\n");
- pci_adapter->init_state = HINIC_INIT_STATE_ALL_INITED;
-
return 0;
-sysfs_create_err:
func_init_err:
+ if (!test_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag))
+ set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
return 0;
init_nictool_err:
@@ -2507,7 +2630,7 @@ static int hinic_probe(struct pci_dev *pdev, const struct
pci_device_id *id)
alloc_chip_node_fail:
lld_unlock_chip_node();
unmapping_bar(pci_adapter);
-
+ceate_nic_delay_work_fail:
map_bar_failed:
hinic_pci_deinit(pdev);
@@ -2573,6 +2696,8 @@ static void hinic_shutdown(struct pci_dev *pdev)
if (pci_adapter)
hinic_shutdown_hwdev(pci_adapter->hwdev);
+
+ pci_disable_device(pdev);
}
#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.h
b/drivers/net/ethernet/huawei/hinic/hinic_lld.h
index da2ad9feb794..004e96f5c93e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_lld.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.h
@@ -16,6 +16,9 @@
#ifndef HINIC_LLD_H_
#define HINIC_LLD_H_
+#define HINIC_SLAVE_NIC_DELAY "hinic_slave_nic_delay"
+#define HINIC_SLAVE_NIC_DELAY_TIME (5 * HZ)
+
struct hinic_lld_dev {
struct pci_dev *pdev;
void *hwdev;
@@ -24,6 +27,7 @@ struct hinic_lld_dev {
enum hinic_init_state {
HINIC_INIT_STATE_NONE,
HINIC_INIT_STATE_PCI_INITED,
+ HINIC_INIT_STATE_HW_IF_INITED,
HINIC_INIT_STATE_HW_PART_INITED,
HINIC_INIT_STATE_HWDEV_INITED,
HINIC_INIT_STATE_DBGTOOL_INITED,
@@ -83,7 +87,7 @@ void hinic_get_all_chip_id(void *card_id);
void hinic_get_card_info(void *hwdev, void *bufin);
int hinic_get_device_id(void *hwdev, u16 *dev_id);
void get_fc_devname(char *devname);
-int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id);
+int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid);
void hinic_tool_cnt_inc(void);
void hinic_tool_cnt_dec(void);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c
b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index b81615eb0aac..9fd3411b6c7d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -48,6 +48,10 @@ static u16 num_qps;
module_param(num_qps, ushort, 0444);
MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default unset)");
+static u16 ovs_num_qps = 16;
+module_param(ovs_num_qps, ushort, 0444);
+MODULE_PARM_DESC(ovs_num_qps, "Number of Queue Pairs in ovs mode
(default=16)");
+
#define DEFAULT_POLL_WEIGHT 64
static unsigned int poll_weight = DEFAULT_POLL_WEIGHT;
module_param(poll_weight, uint, 0444);
@@ -1842,11 +1846,7 @@ static void __update_mac_filter(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
- if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
- netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
- nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
- nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
-
+ if (test_and_clear_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags)) {
hinic_update_mac_filter(nic_dev, &netdev->uc,
&nic_dev->uc_filter_list);
#ifdef NETDEV_HW_ADDR_T_MULTICAST
@@ -1920,6 +1920,13 @@ static void hinic_nic_set_rx_mode(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
+ netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
+ set_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags);
+ nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
+ nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
+ }
+
if (FUNC_SUPPORT_RX_MODE(nic_dev->hwdev))
queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
}
@@ -1968,7 +1975,9 @@ static const struct net_device_ops hinic_netdev_ops = {
#else
.ndo_set_vf_tx_rate = hinic_ndo_set_vf_bw,
#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
-
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
+#endif
.ndo_get_vf_config = hinic_ndo_get_vf_config,
#endif
@@ -2172,6 +2181,9 @@ static void hinic_try_to_enable_rss(struct hinic_nic_dev *nic_dev)
u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
int i, node, err = 0;
u16 num_cpus = 0;
+ enum hinic_service_mode service_mode =
+ hinic_get_service_mode(nic_dev->hwdev);
+
nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev);
if (nic_dev->max_qps <= 1) {
@@ -2202,6 +2214,15 @@ static void hinic_try_to_enable_rss(struct hinic_nic_dev *nic_dev)
MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, nic_dev->num_qps);
+ /* To reduce memory footprint in ovs mode.
+ * VF can't get board info correctly with early pf driver.
+ */
+ if ((hinic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) &&
+ service_mode == HINIC_WORK_MODE_OVS &&
+ hinic_func_type(nic_dev->hwdev) != TYPE_VF)
+ MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps,
+ nic_dev->num_qps);
+
for (i = 0; i < (int)num_online_cpus(); i++) {
node = (int)cpu_to_node(i);
if (node == dev_to_node(&nic_dev->pdev->dev))
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
index d397bb620030..bbefb5ef12cb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
@@ -270,6 +270,8 @@ int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev,
func_to_func->ppf_mbox_cb[mod] = callback;
+ set_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]);
+
return 0;
}
@@ -292,6 +294,8 @@ int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
func_to_func->pf_mbox_cb[mod] = callback;
+ set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
return 0;
}
@@ -314,6 +318,8 @@ int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
func_to_func->vf_mbox_cb[mod] = callback;
+ set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
return 0;
}
@@ -336,6 +342,9 @@ int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
func_to_func->pf_recv_from_ppf_mbox_cb[mod] = callback;
+ set_bit(HINIC_PPF_TO_PF_MBOX_CB_REG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
return 0;
}
@@ -350,6 +359,12 @@ void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev,
{
struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ clear_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PPF_MBOX_CB_RUNNING,
+ &func_to_func->ppf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
func_to_func->ppf_mbox_cb[mod] = NULL;
}
@@ -364,11 +379,17 @@ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
{
struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
func_to_func->pf_mbox_cb[mod] = NULL;
}
/**
- * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for vf
+ * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
* @func_to_func: pointer to func_to_func part of the chip
* @mod: specific mod that the callback will handle
* Return:
@@ -378,6 +399,12 @@ void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
{
struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
func_to_func->vf_mbox_cb[mod] = NULL;
}
@@ -392,11 +419,18 @@ void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
{
struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ clear_bit(HINIC_PPF_TO_PF_MBOX_CB_REG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
func_to_func->pf_recv_from_ppf_mbox_cb[mod] = NULL;
}
-static int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
+int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
{
struct hinic_mbox_func_to_func *func_to_func = handle;
@@ -408,22 +442,32 @@ static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func
*func_to_func,
struct hinic_recv_mbox *recv_mbox,
void *buf_out, u16 *out_size)
{
+ hinic_vf_mbox_cb cb;
+ int ret;
+
if (recv_mbox->mod >= HINIC_MOD_MAX) {
sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
recv_mbox->mod);
return -EINVAL;
}
- if (!func_to_func->vf_mbox_cb[recv_mbox->mod]) {
+ set_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
+ ret = cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len, buf_out, out_size);
+ } else {
sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not
registered\n");
- return -EINVAL;
+ ret = -EINVAL;
}
- return func_to_func->vf_mbox_cb[recv_mbox->mod](func_to_func->hwdev,
- recv_mbox->cmd,
- recv_mbox->mbox,
- recv_mbox->mbox_len,
- buf_out, out_size);
+ clear_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
}
static int
@@ -431,30 +475,43 @@ recv_pf_from_ppf_handler(struct hinic_mbox_func_to_func
*func_to_func,
struct hinic_recv_mbox *recv_mbox,
void *buf_out, u16 *out_size)
{
- hinic_pf_recv_from_ppf_mbox_cb mbox_callback;
+ hinic_pf_recv_from_ppf_mbox_cb cb;
+ enum hinic_mod_type mod = recv_mbox->mod;
+ int ret;
- if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ if (mod >= HINIC_MOD_MAX) {
sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
- recv_mbox->mod);
+ mod);
return -EINVAL;
}
- mbox_callback = func_to_func->pf_recv_from_ppf_mbox_cb[recv_mbox->mod];
- if (!mbox_callback) {
+ set_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
+ cb = func_to_func->pf_recv_from_ppf_mbox_cb[mod];
+ if (cb && test_bit(HINIC_PPF_TO_PF_MBOX_CB_REG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod])) {
+ ret = cb(func_to_func->hwdev, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
sdk_warn(func_to_func->hwdev->dev_hdl, "PF receive ppf mailbox callback is
not registered\n");
- return -EINVAL;
+ ret = -EINVAL;
}
- return mbox_callback(func_to_func->hwdev, recv_mbox->cmd,
- recv_mbox->mbox, recv_mbox->mbox_len, buf_out,
- out_size);
+ clear_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
+ return ret;
}
static int recv_ppf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
struct hinic_recv_mbox *recv_mbox,
u8 pf_id, void *buf_out, u16 *out_size)
{
+ hinic_ppf_mbox_cb cb;
u16 vf_id = 0;
+ int ret;
if (recv_mbox->mod >= HINIC_MOD_MAX) {
sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
@@ -462,18 +519,25 @@ static int recv_ppf_mbox_handler(struct hinic_mbox_func_to_func
*func_to_func,
return -EINVAL;
}
- if (!func_to_func->ppf_mbox_cb[recv_mbox->mod]) {
+ set_bit(HINIC_PPF_MBOX_CB_RUNNING,
+ &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->ppf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_PPF_MBOX_CB_REG,
+ &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) {
+ ret = cb(func_to_func->hwdev, pf_id, vf_id, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod =
%d\n",
recv_mbox->mod);
- return -EINVAL;
+ ret = -EINVAL;
}
- return func_to_func->ppf_mbox_cb[recv_mbox->mod](func_to_func->hwdev,
- pf_id, vf_id,
- recv_mbox->cmd,
- recv_mbox->mbox,
- recv_mbox->mbox_len,
- buf_out, out_size);
+ clear_bit(HINIC_PPF_MBOX_CB_RUNNING,
+ &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
}
static int
@@ -482,7 +546,9 @@ recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func
*func_to_func,
u16 src_func_idx, void *buf_out,
u16 *out_size)
{
+ hinic_pf_mbox_cb cb;
u16 vf_id = 0;
+ int ret;
if (recv_mbox->mod >= HINIC_MOD_MAX) {
sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
@@ -490,16 +556,27 @@ recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func
*func_to_func,
return -EINVAL;
}
- if (!func_to_func->pf_mbox_cb[recv_mbox->mod]) {
+ set_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
+ vf_id = src_func_idx -
+ hinic_glb_pf_vf_offset(func_to_func->hwdev);
+ ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not
registered\n",
recv_mbox->mod);
- return -EINVAL;
+ ret = -EINVAL;
}
- vf_id = src_func_idx - hinic_glb_pf_vf_offset(func_to_func->hwdev);
- return func_to_func->pf_mbox_cb[recv_mbox->mod](func_to_func->hwdev,
- vf_id, recv_mbox->cmd, recv_mbox->mbox,
- recv_mbox->mbox_len, buf_out, out_size);
+ clear_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
}
bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
@@ -596,13 +673,12 @@ static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox
*recv_mbox,
return false;
if (seq_id == 0) {
- recv_mbox->sed_id = seq_id;
+ recv_mbox->seq_id = seq_id;
} else {
- if (seq_id != recv_mbox->sed_id + 1) {
- recv_mbox->sed_id = 0;
+ if (seq_id != recv_mbox->seq_id + 1)
return false;
- }
- recv_mbox->sed_id = seq_id;
+ else
+ recv_mbox->seq_id = seq_id;
}
return true;
@@ -654,7 +730,8 @@ static void recv_mbox_handler(struct hinic_mbox_func_to_func
*func_to_func,
if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
sdk_err(func_to_func->hwdev->dev_hdl,
"Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x,
current id: 0x%x, seg len: 0x%x\n",
- src_func_idx, recv_mbox->sed_id, seq_id, seg_len);
+ src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
return;
}
@@ -671,6 +748,7 @@ static void recv_mbox_handler(struct hinic_mbox_func_to_func
*func_to_func,
recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
HINIC_HWIF_RESPONSE) {
@@ -1389,6 +1467,8 @@ static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
{
int err;
+ mbox_info->seq_id = SEQ_ID_MAX_VAL;
+
mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
if (!mbox_info->mbox)
return -ENOMEM;
@@ -1564,8 +1644,6 @@ int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
prepare_send_mbox(func_to_func);
- hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, vf_to_pf_handler);
-
return 0;
alloc_wb_status_err:
@@ -1587,16 +1665,16 @@ void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
{
struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
- hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ /* destroy workqueue before free related mbox resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(func_to_func->workq);
free_mbox_wb_status(func_to_func);
free_mbox_info(func_to_func->mbox_resp);
free_mbox_info(func_to_func->mbox_send);
-
- destroy_workqueue(func_to_func->workq);
-
spin_lock_deinit(&func_to_func->mbox_lock);
sema_deinit(&func_to_func->mbox_send_sem);
sema_deinit(&func_to_func->msg_send_sem);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
index f5e5c31cfd90..c55d32394f9e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
@@ -60,7 +60,7 @@ struct hinic_recv_mbox {
void *buf_out;
enum hinic_mbox_ack_type ack_type;
struct mbox_msg_info msg_info;
- u8 sed_id;
+ u8 seq_id;
};
struct hinic_send_mbox {
@@ -90,6 +90,17 @@ enum mbox_event_state {
EVENT_END,
};
+enum hinic_mbox_cb_state {
+ HINIC_VF_MBOX_CB_REG = 0,
+ HINIC_VF_MBOX_CB_RUNNING,
+ HINIC_PF_MBOX_CB_REG,
+ HINIC_PF_MBOX_CB_RUNNING,
+ HINIC_PPF_MBOX_CB_REG,
+ HINIC_PPF_MBOX_CB_RUNNING,
+ HINIC_PPF_TO_PF_MBOX_CB_REG,
+ HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+};
+
struct hinic_mbox_func_to_func {
struct hinic_hwdev *hwdev;
@@ -106,6 +117,11 @@ struct hinic_mbox_func_to_func {
hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
hinic_ppf_mbox_cb ppf_mbox_cb[HINIC_MOD_MAX];
hinic_pf_recv_from_ppf_mbox_cb pf_recv_from_ppf_mbox_cb[HINIC_MOD_MAX];
+ unsigned long ppf_to_pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long ppf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long vf_mbox_cb_state[HINIC_MOD_MAX];
+
u8 send_msg_id;
enum mbox_event_state event_flag;
/*lock for mbox event flag*/
@@ -210,4 +226,7 @@ int __hinic_mbox_to_vf(void *hwdev,
enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
index 4a7fd98d1233..5543f1f22bfb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
@@ -39,6 +39,9 @@
#define BUF_OUT_DEFAULT_SIZE 1
#define SEGMENT_LEN 48
+#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HINIC_MSG_TO_MGMT_MAX_LEN, \
+ SEGMENT_LEN) / SEGMENT_LEN)
+
#define MAX_PF_MGMT_BUF_SIZE 2048UL
#define MGMT_MSG_SIZE_MIN 20
@@ -71,9 +74,9 @@
static void pf_to_mgmt_send_event_set(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
int event_flag)
{
- down(&pf_to_mgmt->msg_sem);
+ spin_lock(&pf_to_mgmt->sync_event_lock);
pf_to_mgmt->event_flag = event_flag;
- up(&pf_to_mgmt->msg_sem);
+ spin_unlock(&pf_to_mgmt->sync_event_lock);
}
/**
@@ -97,6 +100,8 @@ int hinic_register_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod,
pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback;
pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle;
+ set_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]);
+
return 0;
}
EXPORT_SYMBOL(hinic_register_mgmt_msg_cb);
@@ -110,17 +115,21 @@ void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type
mod)
{
struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
- if (!hwdev)
+ if (!hwdev || mod >= HINIC_MOD_HW_MAX)
return;
pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
if (!pf_to_mgmt)
return;
- if (mod < HINIC_MOD_HW_MAX) {
- pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL;
- pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL;
- }
+ clear_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]);
+
+ while (test_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL;
+ pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL;
}
EXPORT_SYMBOL(hinic_unregister_mgmt_msg_cb);
@@ -231,6 +240,29 @@ static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
}
+static void clp_prepare_header(struct hinic_hwdev *hwdev,
+ u64 *header, int msg_len,
+ enum hinic_mod_type mod,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ enum hinic_mgmt_cmd cmd, u32 msg_id)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+
+ *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
/**
* prepare_mgmt_cmd - prepare the mgmt command
* @mgmt_cmd: pointer to the command to prepare
@@ -392,18 +424,15 @@ int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8
cmd,
timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
ret = wait_for_completion_timeout(recv_done, timeo);
- down(&pf_to_mgmt->msg_sem);
if (!ret) {
sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %d\n",
pf_to_mgmt->sync_msg_id);
hinic_dump_aeq_info((struct hinic_hwdev *)hwdev);
err = -ETIMEDOUT;
- pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT;
- up(&pf_to_mgmt->msg_sem);
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT);
goto unlock_sync_msg;
}
- pf_to_mgmt->event_flag = SEND_EVENT_END;
- up(&pf_to_mgmt->msg_sem);
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END);
if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) {
destroy_completion(recv_done);
@@ -432,6 +461,410 @@ int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8
cmd,
return err;
}
+static int __get_clp_reg(void *hwdev, enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 *reg_addr)
+{
+ struct hinic_hwdev *dev = hwdev;
+ u32 offset;
+
+ offset = HINIC_CLP_REG_GAP * hinic_pcie_itf_id(dev);
+
+ switch (reg_type) {
+ case HINIC_CLP_BA_HOST:
+ *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ?
+ HINIC_CLP_REG(REQ_SRAM_BA) :
+ HINIC_CLP_REG(RSP_SRAM_BA);
+ break;
+
+ case HINIC_CLP_SIZE_HOST:
+ *reg_addr = HINIC_CLP_REG(SRAM_SIZE);
+ break;
+
+ case HINIC_CLP_LEN_HOST:
+ *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ?
+ HINIC_CLP_REG(REQ) : HINIC_CLP_REG(RSP);
+ break;
+
+ case HINIC_CLP_START_REQ_HOST:
+ *reg_addr = HINIC_CLP_REG(REQ);
+ break;
+
+ case HINIC_CLP_READY_RSP_HOST:
+ *reg_addr = HINIC_CLP_REG(RSP);
+ break;
+
+ default:
+ *reg_addr = 0;
+ break;
+ }
+ if (*reg_addr == 0)
+ return -EINVAL;
+
+ *reg_addr += offset;
+
+ return 0;
+}
+
+static int hinic_read_clp_reg(struct hinic_hwdev *hwdev,
+ enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 *read_value)
+{
+ int err;
+ u32 reg_addr, reg_value;
+
+ if (data_type == HINIC_CLP_REQ_HOST &&
+ reg_type == HINIC_CLP_READY_RSP_HOST)
+ return -EINVAL;
+ if (data_type == HINIC_CLP_RSP_HOST &&
+ reg_type == HINIC_CLP_START_REQ_HOST)
+ return -EINVAL;
+
+ err = __get_clp_reg(hwdev, data_type, reg_type, ®_addr);
+ if (err)
+ return err;
+
+ reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr);
+
+ switch (reg_type) {
+ case HINIC_CLP_BA_HOST:
+ reg_value = ((reg_value >>
+ HINIC_CLP_OFFSET(SRAM_BASE)) &
+ HINIC_CLP_MASK(SRAM_BASE));
+ break;
+
+ case HINIC_CLP_SIZE_HOST:
+ reg_value = ((reg_value >>
+ HINIC_CLP_OFFSET(SRAM_SIZE)) &
+ HINIC_CLP_MASK(SRAM_SIZE));
+ break;
+
+ case HINIC_CLP_LEN_HOST:
+ reg_value = ((reg_value >> HINIC_CLP_OFFSET(LEN)) &
+ HINIC_CLP_MASK(LEN));
+ break;
+
+ case HINIC_CLP_START_REQ_HOST:
+ reg_value = ((reg_value >> HINIC_CLP_OFFSET(START)) &
+ HINIC_CLP_MASK(START));
+ break;
+
+ case HINIC_CLP_READY_RSP_HOST:
+ reg_value = ((reg_value >> HINIC_CLP_OFFSET(READY)) &
+ HINIC_CLP_MASK(READY));
+ break;
+
+ default:
+ break;
+ }
+
+ *read_value = reg_value;
+ return 0;
+}
+
+static int __check_reg_value(enum clp_reg_type reg_type, u32 value)
+{
+ if (reg_type == HINIC_CLP_BA_HOST &&
+ value > HINIC_CLP_SRAM_BASE_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_SIZE_HOST &&
+ value > HINIC_CLP_SRAM_SIZE_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_LEN_HOST &&
+ value > HINIC_CLP_LEN_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_START_REQ_HOST &&
+ value > HINIC_CLP_START_OR_READY_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_READY_RSP_HOST &&
+ value > HINIC_CLP_START_OR_READY_REG_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hinic_write_clp_reg(struct hinic_hwdev *hwdev,
+ enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 value)
+{
+ u32 reg_addr, reg_value;
+
+ if (data_type == HINIC_CLP_REQ_HOST &&
+ reg_type == HINIC_CLP_READY_RSP_HOST)
+ return;
+ if (data_type == HINIC_CLP_RSP_HOST &&
+ reg_type == HINIC_CLP_START_REQ_HOST)
+ return;
+
+ if (__check_reg_value(reg_type, value))
+ return;
+
+ if (__get_clp_reg(hwdev, data_type, reg_type, ®_addr))
+ return;
+
+ reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr);
+
+ switch (reg_type) {
+ case HINIC_CLP_LEN_HOST:
+ reg_value = reg_value &
+ (~(HINIC_CLP_MASK(LEN) << HINIC_CLP_OFFSET(LEN)));
+ reg_value = reg_value | (value << HINIC_CLP_OFFSET(LEN));
+ break;
+
+ case HINIC_CLP_START_REQ_HOST:
+ reg_value = reg_value &
+ (~(HINIC_CLP_MASK(START) <<
+ HINIC_CLP_OFFSET(START)));
+ reg_value = reg_value | (value << HINIC_CLP_OFFSET(START));
+ break;
+
+ case HINIC_CLP_READY_RSP_HOST:
+ reg_value = reg_value &
+ (~(HINIC_CLP_MASK(READY) <<
+ HINIC_CLP_OFFSET(READY)));
+ reg_value = reg_value | (value << HINIC_CLP_OFFSET(READY));
+ break;
+
+ default:
+ return;
+ }
+
+ hinic_hwif_write_reg(hwdev->hwif, reg_addr, reg_value);
+}
+
+static int hinic_read_clp_data(struct hinic_hwdev *hwdev,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+ u32 reg = HINIC_CLP_DATA(RSP);
+ u32 ready, delay_cnt;
+ u32 *ptr = (u32 *)buf_out;
+ u32 temp_out_size = 0;
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, &ready);
+ if (err)
+ return err;
+
+ delay_cnt = 0;
+ while (ready == 0) {
+ usleep_range(9000, 10000);
+ delay_cnt++;
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, &ready);
+ if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX) {
+ sdk_err(hwdev->dev_hdl, "timeout with delay_cnt:%d\n",
+ delay_cnt);
+ return -EINVAL;
+ }
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_LEN_HOST, &temp_out_size);
+ if (err)
+ return err;
+
+ if (temp_out_size > HINIC_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) {
+ sdk_err(hwdev->dev_hdl, "invalid temp_out_size:%d\n",
+ temp_out_size);
+ return -EINVAL;
+ }
+
+ *out_size = (u16)(temp_out_size & 0xffff);
+ for (; temp_out_size > 0; temp_out_size--) {
+ *ptr = hinic_hwif_read_reg(hwdev->hwif, reg);
+ ptr++;
+ reg = reg + 4;
+ }
+
+ hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, (u32)0x0);
+ hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_LEN_HOST, (u32)0x0);
+
+ return 0;
+}
+
+static int hinic_write_clp_data(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size)
+{
+ int err;
+ u32 reg = HINIC_CLP_DATA(REQ);
+ u32 start = 1;
+ u32 delay_cnt = 0;
+ u32 *ptr = (u32 *)buf_in;
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_START_REQ_HOST, &start);
+ if (err)
+ return err;
+
+ while (start == 1) {
+ usleep_range(9000, 10000);
+ delay_cnt++;
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_START_REQ_HOST, &start);
+ if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX)
+ return -EINVAL;
+ }
+
+ hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_LEN_HOST, in_size);
+ hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_START_REQ_HOST, (u32)0x1);
+
+ for (; in_size > 0; in_size--) {
+ hinic_hwif_write_reg(hwdev->hwif, reg, *ptr);
+ ptr++;
+ reg = reg + 4;
+ }
+
+ return 0;
+}
+
+static int hinic_check_clp_init_status(struct hinic_hwdev *hwdev)
+{
+ int err;
+ u32 reg_value = 0;
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_BA_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong req ba value:0x%x\n", reg_value);
+ return -EINVAL;
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_BA_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong rsp ba value:0x%x\n", reg_value);
+ return -EINVAL;
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_SIZE_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong req size\n");
+ return -EINVAL;
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_SIZE_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong rsp size\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic_clear_clp_data(struct hinic_hwdev *hwdev,
+ enum clp_data_type data_type)
+{
+ u32 reg = (data_type == HINIC_CLP_REQ_HOST) ?
+ HINIC_CLP_DATA(REQ) : HINIC_CLP_DATA(RSP);
+ u32 count = HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST;
+
+ for (; count > 0; count--) {
+ hinic_hwif_write_reg(hwdev->hwif, reg, 0x0);
+ reg = reg + 4;
+ }
+}
+
+int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt;
+ struct hinic_hwdev *dev = hwdev;
+ u64 header;
+ u16 real_size;
+ u8 *clp_msg_buf;
+ int err;
+
+ clp_pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->clp_pf_to_mgmt;
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+
+ /*4 bytes alignment*/
+ if (in_size % HINIC_CLP_DATA_UNIT_HOST)
+ real_size = (in_size + (u16)sizeof(header)
+ + HINIC_CLP_DATA_UNIT_HOST);
+ else
+ real_size = in_size + (u16)sizeof(header);
+ real_size = real_size / HINIC_CLP_DATA_UNIT_HOST;
+
+ if (real_size >
+ (HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST)) {
+ sdk_err(dev->dev_hdl, "Invalid real_size:%d\n", real_size);
+ return -EINVAL;
+ }
+ down(&clp_pf_to_mgmt->clp_msg_lock);
+
+ err = hinic_check_clp_init_status(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Check clp init status failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return err;
+ }
+
+ hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST);
+ hinic_write_clp_reg(dev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, 0x0);
+
+ /*Send request*/
+ memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST);
+ clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0);
+
+ memcpy(clp_msg_buf, &header, sizeof(header));
+ clp_msg_buf += sizeof(header);
+ memcpy(clp_msg_buf, buf_in, in_size);
+
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+
+ hinic_clear_clp_data(dev, HINIC_CLP_REQ_HOST);
+ err = hinic_write_clp_data(hwdev,
+ clp_pf_to_mgmt->clp_msg_buf, real_size);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Send clp request failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ /*Get response*/
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+ memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST);
+ err = hinic_read_clp_data(hwdev, clp_msg_buf, &real_size);
+ hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Read clp response failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ real_size = (u16)((real_size * HINIC_CLP_DATA_UNIT_HOST) & 0xffff);
+ if (real_size <= sizeof(header) ||
+ real_size > HINIC_CLP_INPUT_BUFFER_LEN_HOST) {
+ sdk_err(dev->dev_hdl, "Invalid response size:%d", real_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+ real_size = real_size - sizeof(header);
+ if (real_size != *out_size) {
+ sdk_err(dev->dev_hdl, "Invalid real_size:%d, out_size:%d\n",
+ real_size, *out_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+
+ return 0;
+}
+
int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
void *buf_in, u16 in_size, void *buf_out,
u16 *out_size, u32 timeout)
@@ -566,9 +999,16 @@ static void mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt
*pf_to_mgmt,
goto resp;
}
- if (!pf_to_mgmt->recv_mgmt_msg_cb[mod]) {
+ set_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+
+ if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] ||
+ !test_bit(HINIC_MGMT_MSG_CB_REG,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) {
sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n",
mod);
+ clear_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
goto resp;
}
@@ -587,6 +1027,9 @@ static void mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt
*pf_to_mgmt,
cmd, buf_in, in_size,
buf_out, &out_size);
+ clear_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+
resp:
if (!ack_first && need_resp)
__send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size,
@@ -607,7 +1050,7 @@ static void mgmt_resp_msg_handler(struct hinic_msg_pf_to_mgmt
*pf_to_mgmt,
if (recv_msg->msg_id & ASYNC_MSG_FLAG)
return;
- down(&pf_to_mgmt->msg_sem);
+ spin_lock(&pf_to_mgmt->sync_event_lock);
if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id &&
pf_to_mgmt->event_flag == SEND_EVENT_START) {
complete(&recv_msg->recv_done);
@@ -620,7 +1063,7 @@ static void mgmt_resp_msg_handler(struct hinic_msg_pf_to_mgmt
*pf_to_mgmt,
pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
pf_to_mgmt->event_flag);
}
- up(&pf_to_mgmt->msg_sem);
+ spin_unlock(&pf_to_mgmt->sync_event_lock);
}
static void recv_mgmt_msg_work_handler(struct work_struct *work)
@@ -639,6 +1082,24 @@ static void recv_mgmt_msg_work_handler(struct work_struct *work)
kfree(mgmt_work);
}
+static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_msg->seq_id = seq_id;
+ } else {
+ if (seq_id != recv_msg->seq_id + 1)
+ return false;
+
+ recv_msg->seq_id = seq_id;
+ }
+
+ return true;
+}
+
/**
* recv_mgmt_msg_handler - handler a message from mgmt cpu
* @pf_to_mgmt: PF to MGMT channel
@@ -651,7 +1112,8 @@ static void recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt
*pf_to_mgmt,
struct hinic_mgmt_msg_handle_work *mgmt_work;
u64 mbox_header = *((u64 *)header);
void *msg_body = header + sizeof(mbox_header);
- u32 seq_id, seq_len;
+ u8 seq_id, seq_len;
+ u32 offset;
u64 dir;
/* Don't need to get anything from hw when cmd is async */
@@ -662,9 +1124,18 @@ static void recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt
*pf_to_mgmt,
seq_len = HINIC_MSG_HEADER_GET(mbox_header, SEG_LEN);
seq_id = HINIC_MSG_HEADER_GET(mbox_header, SEQID);
- seq_id = seq_id * SEGMENT_LEN;
- memcpy((u8 *)recv_msg->msg + seq_id, msg_body, seq_len);
+ if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x, current
seq_id: 0x%x, seg len: 0x%x\n",
+ recv_msg->seq_id, seq_id, seq_len);
+ /* set seq_id to invalid seq_id */
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+ return;
+ }
+
+ offset = seq_id * SEGMENT_LEN;
+ memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len);
if (!HINIC_MSG_HEADER_GET(mbox_header, LAST))
return;
@@ -675,6 +1146,7 @@ static void recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt
*pf_to_mgmt,
ASYNC_MGMT_TO_PF);
recv_msg->msg_len = HINIC_MSG_HEADER_GET(mbox_header, MSG_LEN);
recv_msg->msg_id = HINIC_MSG_HEADER_GET(mbox_header, MSG_ID);
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
if (HINIC_MSG_HEADER_GET(mbox_header, DIRECTION) ==
HINIC_MSG_RESPONSE) {
@@ -727,10 +1199,6 @@ void hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size)
is_send_dir = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
HINIC_MSG_DIRECT_SEND) ? true : false;
- /* ignore mgmt initiative report events when function deinit */
- if (test_bit(HINIC_HWDEV_FUNC_DEINIT, &dev->func_state) && is_send_dir)
- return;
-
recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
&pf_to_mgmt->recv_resp_msg_from_mgmt;
@@ -744,6 +1212,8 @@ void hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size)
**/
static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
{
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+
recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
if (!recv_msg->msg)
return -ENOMEM;
@@ -850,7 +1320,7 @@ int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
hwdev->pf_to_mgmt = pf_to_mgmt;
pf_to_mgmt->hwdev = hwdev;
spin_lock_init(&pf_to_mgmt->async_msg_lock);
- sema_init(&pf_to_mgmt->msg_sem, 1);
+ spin_lock_init(&pf_to_mgmt->sync_event_lock);
sema_init(&pf_to_mgmt->sync_msg_lock, 1);
pf_to_mgmt->workq = create_singlethread_workqueue(HINIC_MGMT_WQ_NAME);
if (!pf_to_mgmt->workq) {
@@ -880,7 +1350,7 @@ int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
destroy_workqueue(pf_to_mgmt->workq);
create_mgmt_workq_err:
- sema_deinit(&pf_to_mgmt->msg_sem);
+ spin_lock_deinit(&pf_to_mgmt->sync_event_lock);
spin_lock_deinit(&pf_to_mgmt->async_msg_lock);
sema_deinit(&pf_to_mgmt->sync_msg_lock);
kfree(pf_to_mgmt);
@@ -896,10 +1366,13 @@ void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
{
struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+ /* destroy workqueue before free related pf_to_mgmt resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
free_msg_buf(pf_to_mgmt);
- destroy_workqueue(pf_to_mgmt->workq);
- sema_deinit(&pf_to_mgmt->msg_sem);
+ spin_lock_deinit(&pf_to_mgmt->sync_event_lock);
spin_lock_deinit(&pf_to_mgmt->async_msg_lock);
sema_deinit(&pf_to_mgmt->sync_msg_lock);
kfree(pf_to_mgmt);
@@ -915,3 +1388,33 @@ void hinic_flush_mgmt_workq(void *hwdev)
hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED))
flush_workqueue(dev->pf_to_mgmt->workq);
}
+
+int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt;
+
+ clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL);
+ if (!clp_pf_to_mgmt)
+ return -ENOMEM;
+
+ clp_pf_to_mgmt->clp_msg_buf = kzalloc(HINIC_CLP_INPUT_BUFFER_LEN_HOST,
+ GFP_KERNEL);
+ if (!clp_pf_to_mgmt->clp_msg_buf) {
+ kfree(clp_pf_to_mgmt);
+ return -ENOMEM;
+ }
+ sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1);
+
+ hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt;
+
+ return 0;
+}
+
+void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt;
+
+ sema_deinit(&clp_pf_to_mgmt->clp_msg_lock);
+ kfree(clp_pf_to_mgmt->clp_msg_buf);
+ kfree(clp_pf_to_mgmt);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
index 2a9f7ad7cac9..9411d50462bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
@@ -52,6 +52,59 @@
#define HINIC_MGMT_WQ_NAME "hinic_mgmt"
+/*CLP*/
+enum clp_data_type {
+ HINIC_CLP_REQ_HOST = 0,
+ HINIC_CLP_RSP_HOST = 1
+};
+
+enum clp_reg_type {
+ HINIC_CLP_BA_HOST = 0,
+ HINIC_CLP_SIZE_HOST = 1,
+ HINIC_CLP_LEN_HOST = 2,
+ HINIC_CLP_START_REQ_HOST = 3,
+ HINIC_CLP_READY_RSP_HOST = 4
+};
+
+#define HINIC_CLP_REG_GAP (0x20)
+#define HINIC_CLP_INPUT_BUFFER_LEN_HOST (2048UL)
+#define HINIC_CLP_OUTPUT_BUFFER_LEN_HOST (2048UL)
+#define HINIC_CLP_DATA_UNIT_HOST (4UL)
+
+#define HINIC_BAR01_GLOABAL_CTL_OFFSET (0x4000)
+#define HINIC_BAR01_CLP_OFFSET (0x5000)
+
+#define HINIC_CLP_SRAM_SIZE_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x220)
+#define HINIC_CLP_REQ_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x224)
+#define HINIC_CLP_RSP_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x228)
+#define HINIC_CLP_REQ_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x22c)
+#define HINIC_CLP_RSP_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x230)
+#define HINIC_CLP_REG(member) (HINIC_CLP_##member##_REG)
+
+#define HINIC_CLP_REQ_DATA (HINIC_BAR01_CLP_OFFSET)
+#define HINIC_CLP_RSP_DATA (HINIC_BAR01_CLP_OFFSET + 0x1000)
+#define HINIC_CLP_DATA(member) (HINIC_CLP_##member##_DATA)
+
+#define HINIC_CLP_SRAM_SIZE_OFFSET (16)
+#define HINIC_CLP_SRAM_BASE_OFFSET (0)
+#define HINIC_CLP_LEN_OFFSET (0)
+#define HINIC_CLP_START_OFFSET (31)
+#define HINIC_CLP_READY_OFFSET (31)
+#define HINIC_CLP_OFFSET(member) (HINIC_CLP_##member##_OFFSET)
+
+#define HINIC_CLP_SRAM_SIZE_BIT_LEN (0x7ffUL)
+#define HINIC_CLP_SRAM_BASE_BIT_LEN (0x7ffffffUL)
+#define HINIC_CLP_LEN_BIT_LEN (0x7ffUL)
+#define HINIC_CLP_START_BIT_LEN (0x1UL)
+#define HINIC_CLP_READY_BIT_LEN (0x1UL)
+#define HINIC_CLP_MASK(member) (HINIC_CLP_##member##_BIT_LEN)
+
+#define HINIC_CLP_DELAY_CNT_MAX (200UL)
+#define HINIC_CLP_SRAM_SIZE_REG_MAX (0x3ff)
+#define HINIC_CLP_SRAM_BASE_REG_MAX (0x7ffffff)
+#define HINIC_CLP_LEN_REG_MAX (0x3ff)
+#define HINIC_CLP_START_OR_READY_REG_MAX (0x1)
+
enum hinic_msg_direction_type {
HINIC_MSG_DIRECT_SEND = 0,
HINIC_MSG_RESPONSE = 1
@@ -80,6 +133,7 @@ struct hinic_recv_msg {
u16 msg_len;
enum hinic_mod_type mod;
u8 cmd;
+ u8 seq_id;
u16 msg_id;
int async_mgmt_to_pf;
};
@@ -104,6 +158,16 @@ enum comm_pf_to_mgmt_event_state {
SEND_EVENT_END,
};
+enum hinic_mgmt_msg_cb_state {
+ HINIC_MGMT_MSG_CB_REG = 0,
+ HINIC_MGMT_MSG_CB_RUNNING,
+};
+
+struct hinic_clp_pf_to_mgmt {
+ struct semaphore clp_msg_lock;
+ void *clp_msg_buf;
+};
+
struct hinic_msg_pf_to_mgmt {
struct hinic_hwdev *hwdev;
@@ -127,6 +191,7 @@ struct hinic_msg_pf_to_mgmt {
hinic_mgmt_msg_cb recv_mgmt_msg_cb[HINIC_MOD_HW_MAX];
void *recv_mgmt_msg_data[HINIC_MOD_HW_MAX];
+ unsigned long mgmt_msg_cb_state[HINIC_MOD_HW_MAX];
void (*async_msg_cb[HINIC_MOD_HW_MAX])(void *handle,
enum hinic_mgmt_cmd cmd,
@@ -137,8 +202,8 @@ struct hinic_msg_pf_to_mgmt {
struct comm_up_self_msg_info proc;
- /* lock when sending msg */
- struct semaphore msg_sem;
+ /* spinlock when sending msg */
+ spinlock_t sync_event_lock;
enum comm_pf_to_mgmt_event_state event_flag;
};
@@ -172,4 +237,11 @@ int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8
cmd,
int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod,
u8 cmd, void *buf_in, u16 in_size);
+int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev);
+void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
index f4e460624f43..14efbe1449f3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
@@ -122,6 +122,16 @@ struct hinic_port_state {
u16 func_id;
};
+struct hinic_spoofchk_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
struct hinic_mtu {
u8 status;
u8 version;
@@ -914,6 +924,34 @@ struct hinic_promsic_info {
u8 rsvd1;
};
+struct hinic_netq_cfg_msg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 netq_en;
+ u8 rsvd;
+};
+
+/* add/del rxq filter msg */
+struct hinic_rq_filter_msg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 qid;
+ u8 filter_type;
+ u8 qflag;/*0:stdq, 1:defq, 2: netq*/
+
+ u8 mac[6];
+ struct {
+ u8 inner_mac[6];
+ u32 vni;
+ } vxlan;
+};
+
int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
index 8b1b67f3fc14..05523596f3a8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
@@ -92,14 +92,18 @@ void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool
enable)
enable, reg_val);
}
-bool get_master_host_mbox_enable(struct hinic_hwdev *hwdev)
+bool hinic_get_master_host_mbox_enable(void *hwdev)
{
u32 reg_val;
+ struct hinic_hwdev *dev = hwdev;
- if (!IS_SLAVE_HOST(hwdev) || HINIC_FUNC_TYPE(hwdev) == TYPE_VF)
+ if (!hwdev)
+ return false;
+
+ if (!IS_SLAVE_HOST(dev) || HINIC_FUNC_TYPE(dev) == TYPE_VF)
return true;
- reg_val = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR);
+ reg_val = hinic_hwif_read_reg(dev->hwif, HINIC_HOST_MODE_ADDR);
return !!MULTI_HOST_REG_GET(reg_val, MASTER_MBX_STS);
}
@@ -238,7 +242,7 @@ int __mbox_to_host(struct hinic_hwdev *hwdev, enum hinic_mod_type
mod,
goto release_lock;
}
- if (!get_master_host_mbox_enable(hwdev)) {
+ if (!hinic_get_master_host_mbox_enable(hwdev)) {
sdk_err(hwdev->dev_hdl, "Master host not initialized\n");
err = -EFAULT;
goto release_lock;
@@ -715,6 +719,7 @@ int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state
*state)
u8 host_id = 0;
bool host_enable;
int err;
+ int old_state;
if (!hwdev || !state)
return -EINVAL;
@@ -729,6 +734,7 @@ int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state
*state)
if (!mhost_mgmt || state->func_idx >= HINIC_MAX_FUNCTIONS)
return -EINVAL;
+ old_state = test_bit(state->func_idx, mhost_mgmt->func_nic_en) ? 1 : 0;
if (state->state == HINIC_FUNC_NIC_DEL)
clear_bit(state->func_idx, mhost_mgmt->func_nic_en);
else if (state->state == HINIC_FUNC_NIC_ADD)
@@ -740,6 +746,8 @@ int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state
*state)
if (err) {
sdk_err(ppf_hwdev->dev_hdl, "Failed to get function %d host id, err:
%d\n",
state->func_idx, err);
+ old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) :
+ clear_bit(state->func_idx, mhost_mgmt->func_nic_en);
return -EFAULT;
}
@@ -753,8 +761,11 @@ int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state
*state)
/* notify slave host */
err = set_slave_func_nic_state(hwdev, state->func_idx, state->state);
- if (err)
+ if (err) {
+ old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) :
+ clear_bit(state->func_idx, mhost_mgmt->func_nic_en);
return err;
+ }
return 0;
}
@@ -914,8 +925,6 @@ int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev)
hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK);
hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC);
- hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC);
-
kfree(hwdev->mhost_mgmt);
hwdev->mhost_mgmt = NULL;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
index 1e79f71e912e..35715017734d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
@@ -82,7 +82,6 @@ struct hinic_slave_func_nic_state {
};
void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable);
-bool get_master_host_mbox_enable(struct hinic_hwdev *hwdev);
void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable);
void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode);
int rectify_host_mode(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic.h
b/drivers/net/ethernet/huawei/hinic/hinic_nic.h
index a09c7dadd801..be68f3cc5fef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nic.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic.h
@@ -59,6 +59,7 @@ struct vf_data_storage {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
};
struct hinic_nic_cfg {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
index a593c2302bae..56d6da9948c7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
@@ -447,6 +447,125 @@ int hinic_hiovs_del_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id)
return 0;
}
+int hinic_enable_netq(void *hwdev, u8 en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_netq_cfg_msg netq_cfg = {0};
+ u16 out_size = sizeof(netq_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ netq_cfg.func_id = hinic_global_func_id(hwdev);
+ netq_cfg.netq_en = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_NETQ,
+ &netq_cfg, sizeof(netq_cfg),
+ &netq_cfg, &out_size);
+ if (netq_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ nic_warn(nic_hwdev->dev_hdl, "Not support enable netq\n");
+ } else if (err || !out_size || netq_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to enable netq, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, netq_cfg.status, out_size);
+ }
+
+ return err;
+}
+
+int hinic_add_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rq_filter_msg filter_msg = {0};
+ u16 out_size = sizeof(filter_msg);
+ int err;
+
+ if (!hwdev || !filter_info)
+ return -EINVAL;
+
+ switch (filter_info->filter_type) {
+ case HINIC_RQ_FILTER_TYPE_MAC_ONLY:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ break;
+ case HINIC_RQ_FILTER_TYPE_VXLAN:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ memcpy(filter_msg.vxlan.inner_mac,
+ filter_info->vxlan.inner_mac, ETH_ALEN);
+ filter_msg.vxlan.vni = filter_info->vxlan.vni;
+ break;
+ default:
+ nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n",
+ filter_info->filter_type);
+ return -EINVAL;
+ }
+
+ filter_msg.filter_type = filter_info->filter_type;
+ filter_msg.func_id = hinic_global_func_id(hwdev);
+ filter_msg.qid = filter_info->qid;
+ filter_msg.qflag = filter_info->qflag;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_RQ_FILTER,
+ &filter_msg, sizeof(filter_msg),
+ &filter_msg, &out_size);
+ if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ nic_warn(nic_hwdev->dev_hdl, "Not support add rxq filter\n");
+ } else if (err || !out_size || filter_msg.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to add RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, filter_msg.status, out_size);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+int hinic_del_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rq_filter_msg filter_msg = {0};
+ u16 out_size = sizeof(filter_msg);
+ int err;
+
+ if (!hwdev || !filter_info)
+ return -EINVAL;
+
+ switch (filter_info->filter_type) {
+ case HINIC_RQ_FILTER_TYPE_MAC_ONLY:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ break;
+ case HINIC_RQ_FILTER_TYPE_VXLAN:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ memcpy(filter_msg.vxlan.inner_mac,
+ filter_info->vxlan.inner_mac, ETH_ALEN);
+ filter_msg.vxlan.vni = filter_info->vxlan.vni;
+ break;
+ default:
+ nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n",
+ filter_info->filter_type);
+ return -EINVAL;
+ }
+
+ filter_msg.filter_type = filter_info->filter_type;
+ filter_msg.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_RQ_FILTER,
+ &filter_msg, sizeof(filter_msg),
+ &filter_msg, &out_size);
+ if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ nic_warn(nic_hwdev->dev_hdl, "Not support del rxq filter\n");
+ } else if (err || !out_size || filter_msg.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to delte RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, filter_msg.status, out_size);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id)
{
struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
@@ -2179,7 +2298,12 @@ static int hinic_update_vf_mac_msg_handler(struct hinic_nic_io
*nic_io, u16 vf,
struct hinic_port_mac_update *mac_out = buf_out;
int err;
- if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->new_mac)) {
+ if (!is_valid_ether_addr(mac_in->new_mac)) {
+ nic_err(nic_io->hwdev->dev_hdl, "Update VF MAC is invalid.\n");
+ return -EINVAL;
+ }
+
+ if (vf_info->pf_set_mac) {
nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac.\n");
mac_out->status = HINIC_PF_SET_VF_ALREADY;
*out_size = sizeof(*mac_out);
@@ -2695,6 +2819,10 @@ void hinic_get_vf_config(void *hwdev, u16 vf_id, struct
ifla_vf_info *ivi)
ivi->vlan = vfinfo->pf_vlan;
ivi->qos = vfinfo->pf_qos;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ ivi->spoofchk = vfinfo->spoofchk;
+#endif
+
#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
ivi->max_tx_rate = vfinfo->max_rate;
ivi->min_tx_rate = vfinfo->min_rate;
@@ -2729,6 +2857,9 @@ void hinic_clear_vf_infos(void *hwdev, u16 vf_id)
if (vf_infos->max_rate)
hinic_set_vf_tx_rate(hwdev, vf_id, 0, 0);
+ if (vf_infos->spoofchk)
+ hinic_set_vf_spoofchk(hwdev, vf_id, false);
+
memset(vf_infos, 0, sizeof(*vf_infos));
/* set vf_infos to default */
hinic_init_vf_infos(hw_dev->nic_io, HW_VF_ID_TO_OS(vf_id));
@@ -2811,6 +2942,50 @@ int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link)
return 0;
}
+int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = NULL;
+ struct hinic_spoofchk_set spoofchk_cfg = {0};
+ struct vf_data_storage *vf_infos = NULL;
+ u16 out_size = sizeof(spoofchk_cfg);
+ int err = 0;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = hw_dev->nic_io;
+ vf_infos = nic_io->vf_infos;
+
+ spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ spoofchk_cfg.state = spoofchk ? 1 : 0;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK,
+ &spoofchk_cfg,
+ sizeof(spoofchk_cfg), &spoofchk_cfg,
+ &out_size, 0);
+ if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || spoofchk_cfg.status) {
+ nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x,
out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
+ out_size);
+ err = -EINVAL;
+ }
+
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
+
+ return err;
+}
+
+bool hinic_vf_info_spoofchk(void *hwdev, int vf_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ bool spoofchk = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk;
+
+ return spoofchk;
+}
+
static int hinic_set_vf_rate_limit(void *hwdev, u16 vf_id, u32 tx_rate)
{
struct hinic_hwdev *hw_dev = hwdev;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
index 3c68149e57b8..98a3e5320c95 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
@@ -345,6 +345,27 @@ struct hinic_phy_port_stats {
u64 mac_tx_higig2_l3_multicast_pkt_num;
};
+enum hinic_rq_filter_type {
+ HINIC_RQ_FILTER_TYPE_NONE = 0x0,
+ HINIC_RQ_FILTER_TYPE_MAC_ONLY = (1 << 0),
+ HINIC_RQ_FILTER_TYPE_VLAN_ONLY = (1 << 1),
+ HINIC_RQ_FILTER_TYPE_VLANMAC = (1 << 2),
+ HINIC_RQ_FILTER_TYPE_VXLAN = (1 << 3),
+ HINIC_RQ_FILTER_TYPE_GENEVE = (1 << 4),
+};
+
+struct hinic_rq_filter_info {
+ u16 qid;
+ u8 filter_type;/* 1: mac, 8: vxlan */
+ u8 qflag;/*0:stdq, 1:defq, 2: netq*/
+
+ u8 mac[ETH_ALEN];
+ struct {
+ u8 inner_mac[ETH_ALEN];
+ u32 vni;
+ } vxlan;
+};
+
#define HINIC_MGMT_VERSION_MAX_LEN 32
#define HINIC_FW_VERSION_NAME 16
@@ -551,6 +572,10 @@ void hinic_save_pf_link_status(void *hwdev, u8 link);
int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link);
+int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk);
+
+bool hinic_vf_info_spoofchk(void *hwdev, int vf_id);
+
int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate);
int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id);
@@ -588,4 +613,10 @@ int hinic_disable_tx_promisc(void *hwdev);
/* HILINK module */
int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings);
+int hinic_enable_netq(void *hwdev, u8 en);
+int hinic_add_hw_rqfilter(void *hwdev,
+ struct hinic_rq_filter_info *filter_info);
+int hinic_del_hw_rqfilter(void *hwdev,
+ struct hinic_rq_filter_info *filter_info);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
index 39b006c5f20f..fc34ecffdf03 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
@@ -30,7 +30,7 @@
#define HINIC_DRV_NAME "hinic"
#define HINIC_CHIP_NAME "hinic"
-#define HINIC_DRV_VERSION "1.6.2.2"
+#define HINIC_DRV_VERSION "1.8.2.8"
struct vf_data_storage;
#define HINIC_FUNC_IS_VF(hwdev) (hinic_func_type(hwdev) == TYPE_VF)
@@ -44,6 +44,7 @@ enum hinic_flags {
HINIC_BP_ENABLE,
HINIC_SAME_RXTX,
HINIC_INTR_ADAPT,
+ HINIC_UPDATE_MAC_FILTER,
};
#define RX_BUFF_NUM_PER_PAGE 2
@@ -209,6 +210,9 @@ struct hinic_nic_dev {
u8 max_cos;
u8 up_valid_bitmap;
u8 up_cos[HINIC_DCB_UP_MAX];
+ struct ieee_ets hinic_ieee_ets_default;
+ struct ieee_ets hinic_ieee_ets;
+ struct ieee_pfc hinic_ieee_pfc;
struct hinic_dcb_config dcb_cfg;
struct hinic_dcb_config tmp_dcb_cfg;
struct hinic_dcb_config save_dcb_cfg;
@@ -255,11 +259,11 @@ extern struct hinic_uld_info nic_uld_info;
int hinic_open(struct net_device *netdev);
int hinic_close(struct net_device *netdev);
-int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
- u32 in_size, void *buf_out, u32 *out_size);
void hinic_set_ethtool_ops(struct net_device *netdev);
void hinicvf_set_ethtool_ops(struct net_device *netdev);
void hinic_update_num_qps(struct net_device *netdev);
+int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
int hinic_force_port_disable(struct hinic_nic_dev *nic_dev);
int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
index 81e5e7983b82..580c8963720d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
@@ -37,6 +37,7 @@
#define HIADM_DEV_NAME "nictool_dev"
#define MAJOR_DEV_NUM 921
+#define HINIC_CMDQ_BUF_MAX_SIZE 2048U
static dev_t g_dev_id = {0};
/*lint -save -e104 -e808*/
@@ -83,6 +84,11 @@ static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg,
if (nt_msg->module == SEND_TO_UCODE) {
struct hinic_cmd_buf *cmd_buf;
+ if (in_size > HINIC_CMDQ_BUF_MAX_SIZE) {
+ pr_err("Cmdq in size(%u) more than 2KB\n", in_size);
+ return -ENOMEM;
+ }
+
cmd_buf = hinic_alloc_cmd_buf(hwdev);
if (!cmd_buf) {
pr_err("Alloc cmdq cmd buffer failed in %s\n",
@@ -134,6 +140,11 @@ static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg,
!nt_msg->ucode_cmd.ucode_db.ucode_imm) {
struct hinic_cmd_buf *cmd_buf;
+ if (out_size > HINIC_CMDQ_BUF_MAX_SIZE) {
+ pr_err("Cmdq out size(%u) more than 2KB\n", out_size);
+ return -ENOMEM;
+ }
+
cmd_buf = hinic_alloc_cmd_buf(hwdev);
*buf_out = (void *)cmd_buf;
} else {
@@ -987,11 +998,11 @@ static int get_pf_id(void *hwdev, void *buf_in, u32 in_size,
port_id = *((u32 *)buf_in);
pf_info = (struct hinic_pf_info *)buf_out;
- err = hinic_get_pf_id(hwdev, port_id, &pf_info->pf_id);
+ err = hinic_get_pf_id(hwdev, port_id, &pf_info->pf_id,
+ &pf_info->isvalid);
if (err)
return err;
- pf_info->isvalid = 1;
*out_size = sizeof(*pf_info);
return 0;
@@ -1324,7 +1335,7 @@ static int api_csr_read(void *hwdev, struct msg_module *nt_msg,
(u32 *)(((u8 *)buf_out) + offset));
if (ret) {
pr_err("Csr rd fail, err: %d, node_id: %d, csr addr: 0x%08x\n",
- ret, rd_addr + offset, node_id);
+ ret, node_id, rd_addr + offset);
return ret;
}
offset += 4;
@@ -1398,7 +1409,8 @@ static int send_to_up(void *hwdev, struct msg_module *nt_msg,
{
int ret = 0;
- if (nt_msg->up_cmd.up_db.up_api_type == API_CMD) {
+ if (nt_msg->up_cmd.up_db.up_api_type == API_CMD ||
+ nt_msg->up_cmd.up_db.up_api_type == API_CLP) {
enum hinic_mod_type mod;
u8 cmd;
u32 timeout;
@@ -1407,10 +1419,16 @@ static int send_to_up(void *hwdev, struct msg_module *nt_msg,
cmd = nt_msg->up_cmd.up_db.chipif_cmd;
timeout = get_up_timeout_val(mod, cmd);
- ret = hinic_msg_to_mgmt_sync(hwdev, mod, cmd,
- buf_in, (u16)in_size,
- buf_out, (u16 *)out_size,
- timeout);
+
+ if (nt_msg->up_cmd.up_db.up_api_type == API_CMD)
+ ret = hinic_msg_to_mgmt_sync(hwdev, mod, cmd,
+ buf_in, (u16)in_size,
+ buf_out, (u16 *)out_size,
+ timeout);
+ else
+ ret = hinic_clp_to_mgmt(hwdev, mod, cmd,
+ buf_in, (u16)in_size,
+ buf_out, (u16 *)out_size);
if (ret) {
pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n",
mod, cmd);
@@ -1522,7 +1540,8 @@ static int send_to_sm(void *hwdev, struct msg_module *nt_msg,
return ret;
}
-static bool is_hwdev_cmd_support(unsigned int mod, char *ifname)
+static bool is_hwdev_cmd_support(unsigned int mod,
+ char *ifname, u32 up_api_type)
{
void *hwdev;
@@ -1536,13 +1555,19 @@ static bool is_hwdev_cmd_support(unsigned int mod, char *ifname)
case SEND_TO_UP:
case SEND_TO_SM:
if (FUNC_SUPPORT_MGMT(hwdev)) {
- if (!hinic_is_hwdev_mod_inited
+ if (up_api_type == API_CLP) {
+ if (!hinic_is_hwdev_mod_inited
+ (hwdev, HINIC_HWDEV_CLP_INITED)) {
+ pr_err("CLP have not initialized\n");
+ return false;
+ }
+ } else if (!hinic_is_hwdev_mod_inited
(hwdev, HINIC_HWDEV_MGMT_INITED)) {
pr_err("MGMT have not initialized\n");
return false;
}
} else if (!hinic_is_hwdev_mod_inited
- (hwdev, HINIC_HWDEV_MBOX_INITED)) {
+ (hwdev, HINIC_HWDEV_MBOX_INITED)) {
pr_err("MBOX have not initialized\n");
return false;
}
@@ -1571,7 +1596,8 @@ static bool is_hwdev_cmd_support(unsigned int mod, char *ifname)
return true;
}
-static bool nictool_k_is_cmd_support(unsigned int mod, char *ifname)
+static bool nictool_k_is_cmd_support(unsigned int mod,
+ char *ifname, u32 up_api_type)
{
enum hinic_init_state init_state =
hinic_get_init_state_by_ifname(ifname);
@@ -1586,7 +1612,7 @@ static bool nictool_k_is_cmd_support(unsigned int mod, char
*ifname)
return false;
}
} else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) {
- return is_hwdev_cmd_support(mod, ifname);
+ return is_hwdev_cmd_support(mod, ifname, up_api_type);
} else if ((mod >= HINICADM_OVS_DRIVER &&
mod <= HINICADM_FCOE_DRIVER) ||
mod == SEND_TO_HW_DRIVER) {
@@ -1888,7 +1914,8 @@ static long nictool_k_unlocked_ioctl(struct file *pfile,
nt_msg.msg_formate == GET_CHIP_ID)
get_fc_devname(nt_msg.device_name);
- if (!nictool_k_is_cmd_support(cmd_raw, nt_msg.device_name)) {
+ if (!nictool_k_is_cmd_support(cmd_raw, nt_msg.device_name,
+ nt_msg.up_cmd.up_db.up_api_type)) {
ret = -EFAULT;
goto out_free_lock;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
index 8ba91c760686..26b05bb39d58 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
@@ -76,6 +76,7 @@ struct up_cmd_st {
#define API_CMD 0x1
#define API_CHAIN 0x2
+#define API_CLP 0x3
struct msg_module {
char device_name[IFNAMSIZ];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
index 548a242c1c3c..652d6429bbe8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
@@ -101,7 +101,7 @@ enum hinic_port_cmd {
/* 0x4c ~ 0x57 have defined in base line*/
HINIC_PORT_CMD_DISABLE_PROMISIC = 0x4c,
-
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4e,
HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
HINIC_PORT_CMD_GET_BOOT_VERSION,
HINIC_PORT_CMD_GET_MICROCODE_VERSION,
@@ -163,10 +163,17 @@ enum hinic_port_cmd {
/* not defined in base line */
HINIC_PORT_CMD_GET_SFP_INFO = 0xad,
+
+ HINIC_PORT_CMD_SET_NETQ = 0xc1,
+ HINIC_PORT_CMD_ADD_RQ_FILTER = 0xc2,
+ HINIC_PORT_CMD_DEL_RQ_FILTER = 0xc3,
+
HINIC_PORT_CMD_GET_FW_LOG = 0xca,
HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb,
HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc,
+ HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4,
+
HINIC_PORT_CMD_SET_IQ_ENABLE = 0xd6,
HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
index 83c0cd7fccdb..5a39ef678826 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
@@ -462,8 +462,6 @@ enum rq_completion_fmt {
RQ_COMPLETE_SGE = 1
};
-#define HINIC_VLAN_FILTER_EN (1U << 0)
-#define HINIC_BROADCAST_FILTER_EX_EN (1U << 1)
#ifdef __cplusplus
#if __cplusplus
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 38f0a0e86eda..e0ce97dec159 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -158,6 +158,7 @@ static int hinic_rx_fill_buffers(struct hinic_rxq *rxq)
if (likely(i)) {
/* Write all the wqes before pi update */
+ wmb();
hinic_update_rq_hw_pi(nic_dev->hwdev, rxq->q_id,
rxq->next_to_update);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index e044b4115dfd..adc9b755c67b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -315,6 +315,40 @@ int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16
vlan, u8 qos)
}
#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err = 0;
+ bool cur_spoofchk;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_spoofchk = hinic_vf_info_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* same request, so just return success */
+ if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ return 0;
+
+ err = hinic_set_vf_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+
+ if (!err) {
+ nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n",
+ vf, setting ? "on" : "off");
+ } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ nicif_err(adapter, drv, netdev,
+ "Current firmware doesn't support to set vf spoofchk, need to upgrade
latest firmware version\n");
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+#endif
+
int hinic_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index 7e8ff3a52dc0..6dc60d9286ec 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -54,6 +54,11 @@ int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos);
int hinic_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
+
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+#endif
+
int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9c0b13c64a65..276e4492617a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -466,14 +466,12 @@ static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info,
l4.hdr = skb_transport_header(skb);
network_hdr_len = skb_inner_network_header_len(skb);
- if (ip.v4->version == 4) {
- ip.v4->tot_len = 0;
+ if (ip.v4->version == 4)
l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
- } else if (ip.v4->version == 6) {
+ else if (ip.v4->version == 6)
l3_type = IPV6_PKT;
- } else {
+ else
l3_type = 0;
- }
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
@@ -506,12 +504,6 @@ static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info,
network_hdr_len = skb_network_header_len(skb);
}
- /* initialize inner IP header fields */
- if (ip.v4->version == 4)
- ip.v4->tot_len = 0;
- else
- ip.v6->payload_len = 0;
-
get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO,
&l3_type, &l4_proto);
@@ -997,6 +989,7 @@ int hinic_tx_poll(struct hinic_txq *txq, int budget)
u16 hw_ci, sw_ci = 0, q_id = txq->q_id;
hw_ci = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id);
+ dma_rmb();
sw_ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id);
do {
diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl.h
b/drivers/net/ethernet/huawei/hinic/ossl_knl.h
index 0f017a05e55e..7f19deba7d4c 100644
--- a/drivers/net/ethernet/huawei/hinic/ossl_knl.h
+++ b/drivers/net/ethernet/huawei/hinic/ossl_knl.h
@@ -22,10 +22,18 @@
#define __WIN_OR_VMWARE__
#endif
+#if defined(__WIN__) || defined(__VMWARE__) || defined(__UEFI__)
+#define __WIN_OR_VMWARE_OR_UEFI__
+#endif
+
#if (defined(__WIN__) || defined(__VMWARE__)) && !defined(__HIFC__)
#define __WIN_OR_VMWARE_AND_NONHIFC__
#endif
+#if defined(__WIN__) || defined(__UEFI__)
+#define __WIN_OR_UEFI__
+#endif
+
#define sdk_err(dev, format, ...) \
dev_err(dev, "[COMM]"format, ##__VA_ARGS__)
#define sdk_warn(dev, format, ...) \
diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
index 388e9344813b..28b0bfbd9994 100644
--- a/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
+++ b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
@@ -156,6 +156,17 @@ enum ethtool_link_mode_bit_indices {
*/
#define UBUNTU_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, 0) << 8) + (d))
+#ifndef DEEPIN_PRODUCT_VERSION
+#define DEEPIN_PRODUCT_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+
+#ifdef CONFIG_DEEPIN_KERNEL
+#endif
+
+#ifndef DEEPIN_VERSION_CODE
+#define DEEPIN_VERSION_CODE 0
+#endif
+
/* SuSE version macros are the same as Linux kernel version macro. */
#ifndef SLE_VERSION
#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c)
@@ -474,4 +485,7 @@ int local_atoi(const char *name);
#define sema_deinit(lock)
#define mutex_deinit(lock)
#define rwlock_deinit(lock)
+
+#define tasklet_state(tasklet) ((tasklet)->state)
+
#endif
--
2.31.0