From: Xue <xuechaojing(a)huawei.com>
commit 746ea35981b1f77e988d48642409d73f0470b3eb openEuler-1.0
driver inclusion
category:bugfix
bugzilla:4472
CVE:NA
------------------------------------------------------------------
Add Hardware Abstract Layer support (Hi1822 SDK layer), include:
1. device initialization
2. configuration management
3. basic management and IO infrastructure
4. provide common NIC and stateful services APIs
5. etc.
Signed-off-by: Xue <xuechaojing(a)huawei.com>
Reviewed-by: chiqijun <chiqijun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Signed-off-by: Xin Hao <haoxing990(a)gmail.com>
---
drivers/net/ethernet/huawei/Kconfig | 1 +
drivers/net/ethernet/huawei/hinic/Kconfig | 5 +-
drivers/net/ethernet/huawei/hinic/Makefile | 11 +-
.../net/ethernet/huawei/hinic/hinic_api_cmd.c | 1176 ++++
.../net/ethernet/huawei/hinic/hinic_api_cmd.h | 298 +
drivers/net/ethernet/huawei/hinic/hinic_cfg.c | 2311 ++++++++
drivers/net/ethernet/huawei/hinic/hinic_cfg.h | 531 ++
.../net/ethernet/huawei/hinic/hinic_cmdq.c | 1503 +++++
.../net/ethernet/huawei/hinic/hinic_cmdq.h | 214 +
drivers/net/ethernet/huawei/hinic/hinic_csr.h | 201 +
.../net/ethernet/huawei/hinic/hinic_ctx_def.h | 253 +
drivers/net/ethernet/huawei/hinic/hinic_dbg.h | 90 +
.../net/ethernet/huawei/hinic/hinic_dfx_def.h | 148 +
drivers/net/ethernet/huawei/hinic/hinic_eqs.c | 1302 +++++
drivers/net/ethernet/huawei/hinic/hinic_eqs.h | 148 +
drivers/net/ethernet/huawei/hinic/hinic_hw.h | 722 +++
.../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 587 +-
.../net/ethernet/huawei/hinic/hinic_hwdev.c | 5019 +++++++++++++++++
.../net/ethernet/huawei/hinic/hinic_hwdev.h | 377 ++
.../net/ethernet/huawei/hinic/hinic_hwif.c | 800 +++
.../net/ethernet/huawei/hinic/hinic_hwif.h | 117 +
drivers/net/ethernet/huawei/hinic/hinic_lld.c | 2685 +++++++++
drivers/net/ethernet/huawei/hinic/hinic_lld.h | 116 +
.../net/ethernet/huawei/hinic/hinic_mbox.c | 1605 ++++++
.../net/ethernet/huawei/hinic/hinic_mbox.h | 213 +
.../net/ethernet/huawei/hinic/hinic_mgmt.c | 917 +++
.../net/ethernet/huawei/hinic/hinic_mgmt.h | 175 +
.../huawei/hinic/hinic_mgmt_interface.h | 919 +++
.../ethernet/huawei/hinic/hinic_msix_attr.c | 130 +
.../ethernet/huawei/hinic/hinic_msix_attr.h | 69 +
.../huawei/hinic/hinic_multi_host_mgmt.c | 923 +++
.../huawei/hinic/hinic_multi_host_mgmt.h | 94 +
drivers/net/ethernet/huawei/hinic/hinic_nic.h | 109 +
.../net/ethernet/huawei/hinic/hinic_nic_cfg.c | 3489 ++++++++++++
.../net/ethernet/huawei/hinic/hinic_nic_cfg.h | 591 ++
.../net/ethernet/huawei/hinic/hinic_nic_dbg.c | 281 +
.../net/ethernet/huawei/hinic/hinic_nic_io.c | 993 ++++
.../net/ethernet/huawei/hinic/hinic_nic_io.h | 96 +
.../ethernet/huawei/hinic/hinic_port_cmd.h | 536 ++
.../net/ethernet/huawei/hinic/hinic_qe_def.h | 473 ++
.../net/ethernet/huawei/hinic/hinic_sm_lt.h | 228 +
.../ethernet/huawei/hinic/hinic_sml_counter.c | 301 +
.../ethernet/huawei/hinic/hinic_sml_counter.h | 118 +
.../net/ethernet/huawei/hinic/hinic_sml_lt.c | 238 +
.../ethernet/huawei/hinic/hinic_sml_table.h | 2728 +++++++++
.../huawei/hinic/hinic_sml_table_pub.h | 277 +
drivers/net/ethernet/huawei/hinic/hinic_wq.c | 687 +++
drivers/net/ethernet/huawei/hinic/hinic_wq.h | 117 +
drivers/net/ethernet/huawei/hinic/ossl_knl.h | 47 +
.../ethernet/huawei/hinic/ossl_knl_linux.c | 138 +
.../ethernet/huawei/hinic/ossl_knl_linux.h | 477 ++
.../net/ethernet/huawei/hinic/ossl_types.h | 41 +
52 files changed, 35522 insertions(+), 103 deletions(-)
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cfg.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cfg.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_csr.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dbg.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_eqs.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_eqs.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwif.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwif.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_lld.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_lld.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mbox.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mbox.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_io.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_io.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_wq.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_wq.h
create mode 100644 drivers/net/ethernet/huawei/hinic/ossl_knl.h
create mode 100644 drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c
create mode 100644 drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
create mode 100644 drivers/net/ethernet/huawei/hinic/ossl_types.h
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
index c1a95ae4058b..dc48e5163531 100644
--- a/drivers/net/ethernet/huawei/Kconfig
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -5,6 +5,7 @@
config NET_VENDOR_HUAWEI
bool "Huawei devices"
default y
+ depends on ARM64
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig
b/drivers/net/ethernet/huawei/hinic/Kconfig
index e4e8b24c1a5d..312b9d637d17 100644
--- a/drivers/net/ethernet/huawei/hinic/Kconfig
+++ b/drivers/net/ethernet/huawei/hinic/Kconfig
@@ -4,9 +4,10 @@
config HINIC
tristate "Huawei Intelligent PCIE Network Interface Card"
- depends on (PCI_MSI && (X86 || ARM64))
+ default n
+ depends on PCI_MSI && NUMA && PCI_IOV && DCB
---help---
This driver supports HiNIC PCIE Ethernet cards.
To compile this driver as part of the kernel, choose Y here.
If unsure, choose N.
- The default is compiled as module.
+ The default is N.
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile
b/drivers/net/ethernet/huawei/hinic/Makefile
index 289ce88bb2d0..09569d05c196 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -1,6 +1,9 @@
obj-$(CONFIG_HINIC) += hinic.o
-hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
- hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
- hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o
+hinic-y := hinic_nic_cfg.o hinic_nic_io.o hinic_nic_dbg.o \
+ hinic_hwif.o hinic_msix_attr.o hinic_eqs.o \
+ hinic_mbox.o hinic_api_cmd.o hinic_mgmt.o \
+ hinic_wq.o hinic_cmdq.o hinic_hwdev.o hinic_cfg.o \
+ ossl_knl_linux.o \
+ hinic_sml_counter.o hinic_sml_lt.o \
+ hinic_multi_host_mgmt.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c
b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c
new file mode 100644
index 000000000000..de190ba06064
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/semaphore.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_api_cmd.h"
+
+#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U
+
+#define API_CMD_CELL_DESC_SIZE 8
+#define API_CMD_CELL_DATA_ADDR_SIZE 8
+
+#define API_CHAIN_NUM_CELLS 32
+#define API_CHAIN_CELL_SIZE 128
+#define API_CHAIN_RSP_DATA_SIZE 128
+
+#define API_CMD_CELL_WB_ADDR_SIZE 8
+
+#define API_CHAIN_CELL_ALIGNMENT 8
+
+#define API_CMD_TIMEOUT 10000
+#define API_CMD_STATUS_TIMEOUT 100000
+
+#define API_CMD_BUF_SIZE 2048ULL
+
+#define API_CMD_NODE_ALIGN_SIZE 512ULL
+#define API_PAYLOAD_ALIGN_SIZE 64ULL
+
+#define API_CHAIN_RESP_ALIGNMENT 64ULL
+
+#define COMPLETION_TIMEOUT_DEFAULT 1000UL
+#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U
+
+#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val)))
+
+#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token))
+#define WRITE_API_CMD_PRIV_DATA(id) (((u8)id) << 16)
+
+#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
+
+#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2)
+#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3)
+
+enum api_cmd_data_format {
+ SGL_DATA = 1,
+};
+
+enum api_cmd_type {
+ API_CMD_WRITE_TYPE = 0,
+ API_CMD_READ_TYPE = 1,
+};
+
+enum api_cmd_bypass {
+ NOT_BYPASS = 0,
+ BYPASS = 1,
+};
+
+enum api_cmd_resp_aeq {
+ NOT_TRIGGER = 0,
+ TRIGGER = 1,
+};
+
+static u8 xor_chksum_set(void *data)
+{
+ int idx;
+ u8 checksum = 0;
+ u8 *val = data;
+
+ for (idx = 0; idx < 7; idx++)
+ checksum ^= val[idx];
+
+ return checksum;
+}
+
+static void set_prod_idx(struct hinic_api_cmd_chain *chain)
+{
+ enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 hw_prod_idx_addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
+ u32 prod_idx = chain->prod_idx;
+
+ hinic_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx);
+}
+
+static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
+}
+
+static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm:
0x%x\n",
+ chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR),
+ HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR),
+ HINIC_API_CMD_STATUS_GET(val, FSM));
+
+ sdk_err(dev, "Chain hw current ci: 0x%x\n",
+ HINIC_API_CMD_STATUS_GET(val, CONS_IDX));
+
+ addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+ sdk_err(dev, "Chain hw current pi: 0x%x\n", val);
+}
+
+/**
+ * chain_busy - check if the chain is still processing last requests
+ * @chain: chain to check
+ **/
+static int chain_busy(struct hinic_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ struct hinic_api_cmd_cell_ctxt *ctxt;
+ u64 resp_header;
+
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_MULTI_READ:
+ case HINIC_API_CMD_POLL_READ:
+ resp_header = be64_to_cpu(ctxt->resp->header);
+ if (ctxt->status &&
+ !HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) {
+ sdk_err(dev, "Context(0x%x) busy!, pi: %d, resp_header: 0x%08x%08x\n",
+ ctxt->status, chain->prod_idx,
+ upper_32_bits(resp_header),
+ lower_32_bits(resp_header));
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ chain->cons_idx = get_hw_cons_idx(chain);
+
+ if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) {
+ sdk_err(dev, "API CMD chain %d is busy, cons_idx = %d, prod_idx = %d\n",
+ chain->chain_type, chain->cons_idx,
+ chain->prod_idx);
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+ default:
+ sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * get_cell_data_size - get the data size of specific cell type
+ * @type: chain type
+ **/
+static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type, u16 cmd_size)
+{
+ u16 cell_data_size = 0;
+
+ switch (type) {
+ case HINIC_API_CMD_POLL_READ:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_WB_ADDR_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CHAIN_CELL_ALIGNMENT);
+ break;
+
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CHAIN_CELL_ALIGNMENT);
+ break;
+ default:
+ break;
+ }
+
+ return cell_data_size;
+}
+
+/**
+ * prepare_cell_ctrl - prepare the ctrl of the cell for the command
+ * @cell_ctrl: the control of the cell to set the control into it
+ * @cell_len: the size of the cell
+ **/
+static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len)
+{
+ u64 ctrl;
+ u8 chksum;
+
+ ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) |
+ HINIC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) |
+ HINIC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF);
+
+ chksum = xor_chksum_set(&ctrl);
+
+ ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ *cell_ctrl = cpu_to_be64(ctrl);
+}
+
+/**
+ * prepare_api_cmd - prepare API CMD command
+ * @chain: chain for the command
+ * @cell: the cell of the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ **/
+static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ u32 priv;
+
+ cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_POLL_READ:
+ priv = READ_API_CMD_PRIV_DATA(chain->chain_type,
+ cell_ctxt->saved_prod_idx);
+ cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) |
+ HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) |
+ HINIC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ case HINIC_API_CMD_POLL_WRITE:
+ priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type);
+ cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) |
+ HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) |
+ HINIC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type);
+ cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) |
+ HINIC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) |
+ HINIC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ default:
+ sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n",
+ chain->chain_type);
+ return;
+ }
+
+ cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
+ HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
+
+ cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
+ XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ cell->desc = cpu_to_be64(cell->desc);
+
+ memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
+}
+
+/**
+ * prepare_cell - prepare cell ctrl and cmd in the current producer cell
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ * Return: 0 - success, negative - failure
+ **/
+static void prepare_cell(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell *curr_node;
+ u16 cell_size;
+
+ curr_node = chain->curr_node;
+
+ cell_size = get_cell_data_size(chain->chain_type, cmd_size);
+
+ prepare_cell_ctrl(&curr_node->ctrl, cell_size);
+ prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size);
+}
+
+static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
+{
+ chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
+}
+
+static void issue_api_cmd(struct hinic_api_cmd_chain *chain)
+{
+ set_prod_idx(chain);
+}
+
+/**
+ * api_cmd_status_update - update the status of the chain
+ * @chain: chain to update
+ **/
+static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_api_cmd_status *wb_status;
+ enum hinic_api_cmd_chain_type chain_type;
+ u64 status_header;
+ u32 buf_desc;
+
+ wb_status = chain->wb_status;
+
+ buf_desc = be32_to_cpu(wb_status->buf_desc);
+ if (HINIC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR))
+ return;
+
+ status_header = be64_to_cpu(wb_status->header);
+ chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
+ if (chain_type >= HINIC_API_CMD_MAX)
+ return;
+
+ if (chain_type != chain->chain_type)
+ return;
+
+ chain->cons_idx = HINIC_API_CMD_STATUS_GET(buf_desc, CONS_IDX);
+}
+
+/**
+ * wait_for_status_poll - wait for write to mgmt command to complete
+ * @chain: the chain of the command
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
+{
+ int err = -ETIMEDOUT;
+ u32 cnt = 0;
+
+ while (cnt < API_CMD_STATUS_TIMEOUT &&
+ chain->hwdev->chip_present_flag) {
+ api_cmd_status_update(chain);
+
+ /* SYNC API CMD cmd should start after prev cmd finished */
+ if (chain->cons_idx == chain->prod_idx) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(50, 100);
+ cnt++;
+ }
+
+ return err;
+}
+
+static void copy_resp_data(struct hinic_api_cmd_cell_ctxt *ctxt, void *ack,
+ u16 ack_size)
+{
+ struct hinic_api_cmd_resp_fmt *resp = ctxt->resp;
+
+ memcpy(ack, &resp->resp_data, ack_size);
+ ctxt->status = 0;
+}
+
+/**
+ * prepare_cell - polling for respense data of the read api-command
+ * @chain: pointer to api cmd chain
+ *
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_resp_polling(struct hinic_api_cmd_cell_ctxt *ctxt)
+{
+ u64 resp_header;
+ int ret = -ETIMEDOUT;
+ u32 cnt = 0;
+
+ while (cnt < POLLING_COMPLETION_TIMEOUT_DEFAULT) {
+ resp_header = be64_to_cpu(ctxt->resp->header);
+
+ rmb(); /* read the latest header */
+
+ if (HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) {
+ ret = 0;
+ break;
+ }
+ usleep_range(100, 1000);
+ cnt++;
+ }
+
+ if (ret)
+ pr_err("Wait for api chain response timeout\n");
+
+ return ret;
+}
+
+/**
+ * wait_for_api_cmd_completion - wait for command to complete
+ * @chain: chain for the command
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell_ctxt *ctxt,
+ void *ack, u16 ack_size)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ int err = 0;
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_POLL_READ:
+ err = wait_for_resp_polling(ctxt);
+ if (!err)
+ copy_resp_data(ctxt, ack, ack_size);
+ break;
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ err = wait_for_status_poll(chain);
+ if (err) {
+ sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n",
+ chain->chain_type);
+ break;
+ }
+ break;
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ /* No need to wait */
+ break;
+ default:
+ sdk_err(dev, "Unknown API CMD Chain type: %d\n",
+ chain->chain_type);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ dump_api_chain_reg(chain);
+
+ return err;
+}
+
+static inline void update_api_cmd_ctxt(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell_ctxt *ctxt)
+{
+ ctxt->status = 1;
+ ctxt->saved_prod_idx = chain->prod_idx;
+ if (ctxt->resp) {
+ ctxt->resp->header = 0;
+
+ /* make sure "header" was cleared */
+ wmb();
+ }
+}
+
+/**
+ * api_cmd - API CMD command
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @size: the command size
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size, void *ack, u16 ack_size)
+{
+ struct hinic_api_cmd_cell_ctxt *ctxt;
+
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock(&chain->async_lock);
+ else
+ down(&chain->sem);
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+ if (chain_busy(chain)) {
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_unlock(&chain->async_lock);
+ else
+ up(&chain->sem);
+ return -EBUSY;
+ }
+ update_api_cmd_ctxt(chain, ctxt);
+
+ prepare_cell(chain, dest, cmd, cmd_size);
+
+ cmd_chain_prod_idx_inc(chain);
+
+ wmb(); /* issue the command */
+
+ issue_api_cmd(chain);
+
+ /* incremented prod idx, update ctxt */
+
+ chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr;
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_unlock(&chain->async_lock);
+ else
+ up(&chain->sem);
+
+ return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size);
+}
+
+/**
+ * hinic_api_cmd_write - Write API CMD command
+ * @chain: chain for write command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @size: the command size
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size)
+{
+ /* Verify the chain type */
+ return api_cmd(chain, dest, cmd, size, NULL, 0);
+}
+
+int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 size, void *ack, u16 ack_size)
+{
+ return api_cmd(chain, dest, cmd, size, ack, ack_size);
+}
+
+/**
+ * api_cmd_hw_restart - restart the chain in the HW
+ * @chain: the API CMD specific chain to restart
+ **/
+static int api_cmd_hw_restart(struct hinic_api_cmd_chain *cmd_chain)
+{
+ struct hinic_hwif *hwif = cmd_chain->hwdev->hwif;
+ u32 reg_addr, val;
+ int err;
+ u32 cnt = 0;
+
+ /* Read Modify Write */
+ reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type);
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
+ val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
+
+ hinic_hwif_write_reg(hwif, reg_addr, val);
+
+ err = -ETIMEDOUT;
+ while (cnt < API_CMD_TIMEOUT) {
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_ctrl_init - set the control register of a chain
+ * @chain: the API CMD specific chain to set control register for
+ **/
+static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 reg_addr, ctrl;
+ u32 size;
+
+ /* Read Modify Write */
+ reg_addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT);
+
+ ctrl = hinic_hwif_read_reg(hwif, reg_addr);
+
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) |
+ HINIC_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, reg_addr, ctrl);
+}
+
+/**
+ * api_cmd_set_status_addr - set the status address of a chain in the HW
+ * @chain: the API CMD specific chain to set status address for
+ **/
+static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_set_num_cells - set the number cells of a chain in the HW
+ * @chain: the API CMD specific chain to set the number of cells for
+ **/
+static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
+ val = chain->num_cells;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_head_init - set the head cell of a chain in the HW
+ * @chain: the API CMD specific chain to set the head for
+ **/
+static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * wait_for_ready_chain - wait for the chain to be ready
+ * @chain: the API CMD specific chain to wait for
+ * Return: 0 - success, negative - failure
+ **/
+static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+ u32 hw_cons_idx;
+ u32 cnt = 0;
+ int err;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ err = -ETIMEDOUT;
+ while (cnt < API_CMD_TIMEOUT) {
+ val = hinic_hwif_read_reg(hwif, addr);
+ hw_cons_idx = HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
+
+ /* wait for HW cons idx to be updated */
+ if (hw_cons_idx == chain->cons_idx) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_chain_hw_clean - clean the HW
+ * @chain: the API CMD specific chain
+ **/
+static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, ctrl;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ ctrl = hinic_hwif_read_reg(hwif, addr);
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl);
+}
+
+/**
+ * api_cmd_chain_hw_init - initialize the chain in the HW
+ * @chain: the API CMD specific chain to initialize in HW
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
+{
+ api_cmd_chain_hw_clean(chain);
+
+ api_cmd_set_status_addr(chain);
+
+ if (api_cmd_hw_restart(chain)) {
+ sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n");
+ return -EBUSY;
+ }
+
+ api_cmd_ctrl_init(chain);
+ api_cmd_set_num_cells(chain);
+ api_cmd_head_init(chain);
+
+ return wait_for_ready_chain(chain);
+}
+
+/**
+ * alloc_cmd_buf - allocate a dma buffer for API CMD command
+ * @chain: the API CMD specific chain for the cmd
+ * @cell: the cell in the HW for the cmd
+ * @cell_idx: the index of the cell
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell, u32 cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ void *dev = chain->hwdev->dev_hdl;
+ void *buf_vaddr;
+ u64 buf_paddr;
+ int err = 0;
+
+ buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base +
+ chain->buf_size_align * cell_idx);
+ buf_paddr = chain->buf_paddr_base +
+ chain->buf_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->api_cmd_vaddr = buf_vaddr;
+
+ /* set the cmd DMA address in the cell */
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_POLL_READ:
+ cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr);
+ break;
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ /* The data in the HW should be in Big Endian Format */
+ cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr);
+ break;
+ default:
+ sdk_err(dev, "Unknown API CMD Chain type: %d\n",
+ chain->chain_type);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static void alloc_resp_buf(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell, u32 cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ void *resp_vaddr;
+ u64 resp_paddr;
+
+ resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base +
+ chain->rsp_size_align * cell_idx);
+ resp_paddr = chain->rsp_paddr_base +
+ chain->rsp_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->resp = resp_vaddr;
+ cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr);
+}
+
+static int hinic_alloc_api_cmd_cell_buf(struct hinic_api_cmd_chain *chain,
+ u32 cell_idx,
+ struct hinic_api_cmd_cell *node)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ int err;
+
+ /* For read chain, we should allocate buffer for the response data */
+ if (chain->chain_type == HINIC_API_CMD_MULTI_READ ||
+ chain->chain_type == HINIC_API_CMD_POLL_READ)
+ alloc_resp_buf(chain, node, cell_idx);
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_POLL_READ:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ err = alloc_cmd_buf(chain, node, cell_idx);
+ if (err) {
+ sdk_err(dev, "Failed to allocate cmd buffer\n");
+ goto alloc_cmd_buf_err;
+ }
+ break;
+ /* For api command write and api command read, the data section
+ * is directly inserted in the cell, so no need to allocate.
+ */
+ case HINIC_API_CMD_MULTI_READ:
+ chain->cell_ctxt[cell_idx].api_cmd_vaddr =
+ &node->read.hw_cmd_paddr;
+ break;
+ default:
+ sdk_err(dev, "Unsupported API CMD chain type\n");
+ err = -EINVAL;
+ goto alloc_cmd_buf_err;
+ }
+
+ return 0;
+
+alloc_cmd_buf_err:
+
+ return err;
+}
+
+/**
+ * api_cmd_create_cell - create API CMD cell of specific chain
+ * @chain: the API CMD specific chain to create its cell
+ * @cell_idx: the cell index to create
+ * @pre_node: previous cell
+ * @node_vaddr: the virt addr of the cell
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, u32 cell_idx,
+ struct hinic_api_cmd_cell *pre_node,
+ struct hinic_api_cmd_cell **node_vaddr)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_api_cmd_cell *node;
+ void *cell_vaddr;
+ u64 cell_paddr;
+ int err;
+
+ cell_vaddr = (void *)((u64)chain->cell_vaddr_base +
+ chain->cell_size_align * cell_idx);
+ cell_paddr = chain->cell_paddr_base +
+ chain->cell_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+ cell_ctxt->cell_vaddr = cell_vaddr;
+ node = cell_ctxt->cell_vaddr;
+
+ if (!pre_node) {
+ chain->head_node = cell_vaddr;
+ chain->head_cell_paddr = cell_paddr;
+ } else {
+ /* The data in the HW should be in Big Endian Format */
+ pre_node->next_cell_paddr = cpu_to_be64(cell_paddr);
+ }
+
+ /* Driver software should make sure that there is an empty API
+ * command cell at the end the chain
+ */
+ node->next_cell_paddr = 0;
+
+ err = hinic_alloc_api_cmd_cell_buf(chain, cell_idx, node);
+ if (err)
+ return err;
+
+ *node_vaddr = node;
+
+ return 0;
+}
+
+/**
+ * api_cmd_create_cells - create API CMD cells for specific chain
+ * @chain: the API CMD specific chain
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
+ void *dev = chain->hwdev->dev_hdl;
+ u32 cell_idx;
+ int err;
+
+ for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
+ err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
+ if (err) {
+ sdk_err(dev, "Failed to create API CMD cell\n");
+ return err;
+ }
+
+ pre_node = node;
+ }
+
+ if (!node)
+ return -EFAULT;
+
+ /* set the Final node to point on the start */
+ node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
+
+ /* set the current node to be the head */
+ chain->curr_node = chain->head_node;
+ return 0;
+}
+
+/**
+ * api_chain_init - initialize API CMD specific chain
+ * @chain: the API CMD specific chain to initialize
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ **/
+static int api_chain_init(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_chain_attr *attr)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ size_t cell_ctxt_size;
+ size_t cells_buf_size;
+ int err;
+
+ chain->chain_type = attr->chain_type;
+ chain->num_cells = attr->num_cells;
+ chain->cell_size = attr->cell_size;
+ chain->rsp_size = attr->rsp_size;
+
+ chain->prod_idx = 0;
+ chain->cons_idx = 0;
+
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock_init(&chain->async_lock);
+ else
+ sema_init(&chain->sem, 1);
+
+ cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
+ if (!cell_ctxt_size) {
+ sdk_err(dev, "Api chain cell size cannot be zero\n");
+ err = -EINVAL;
+ goto alloc_cell_ctxt_err;
+ }
+
+ chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL);
+ if (!chain->cell_ctxt) {
+ sdk_err(dev, "Failed to allocate cell contexts for a chain\n");
+ err = -ENOMEM;
+ goto alloc_cell_ctxt_err;
+ }
+
+ chain->wb_status = dma_zalloc_coherent(dev,
+ sizeof(*chain->wb_status),
+ &chain->wb_status_paddr,
+ GFP_KERNEL);
+ if (!chain->wb_status) {
+ sdk_err(dev, "Failed to allocate DMA wb status\n");
+ err = -ENOMEM;
+ goto alloc_wb_status_err;
+ }
+
+ chain->cell_size_align = ALIGN((u64)chain->cell_size,
+ API_CMD_NODE_ALIGN_SIZE);
+ chain->rsp_size_align = ALIGN((u64)chain->rsp_size,
+ API_CHAIN_RESP_ALIGNMENT);
+ chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE);
+
+ cells_buf_size = (chain->cell_size_align + chain->rsp_size_align +
+ chain->buf_size_align) * chain->num_cells;
+
+ err = hinic_dma_zalloc_coherent_align(dev, cells_buf_size,
+ API_CMD_NODE_ALIGN_SIZE,
+ GFP_KERNEL,
+ &chain->cells_addr);
+ if (err) {
+ sdk_err(dev, "Failed to allocate API CMD cells buffer\n");
+ goto alloc_cells_buf_err;
+ }
+
+ chain->cell_vaddr_base = chain->cells_addr.align_vaddr;
+ chain->cell_paddr_base = chain->cells_addr.align_paddr;
+
+ chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base +
+ chain->cell_size_align * chain->num_cells);
+ chain->rsp_paddr_base = chain->cell_paddr_base +
+ chain->cell_size_align * chain->num_cells;
+
+ chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base +
+ chain->rsp_size_align * chain->num_cells);
+ chain->buf_paddr_base = chain->rsp_paddr_base +
+ chain->rsp_size_align * chain->num_cells;
+
+ return 0;
+
+alloc_cells_buf_err:
+ dma_free_coherent(dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+
+alloc_wb_status_err:
+ kfree(chain->cell_ctxt);
+
+/*lint -save -e548*/
+alloc_cell_ctxt_err:
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock_deinit(&chain->async_lock);
+ else
+ sema_deinit(&chain->sem);
+/*lint -restore*/
+ return err;
+}
+
+/**
+ * api_chain_free - free API CMD specific chain
+ * @chain: the API CMD specific chain to free
+ **/
+static void api_chain_free(struct hinic_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+
+ hinic_dma_free_coherent_align(dev, &chain->cells_addr);
+
+ dma_free_coherent(dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+ kfree(chain->cell_ctxt);
+
+/*lint -save -e548*/
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock_deinit(&chain->async_lock);
+ else
+ sema_deinit(&chain->sem);
+/*lint -restore*/
+}
+
+/**
+ * api_cmd_create_chain - create API CMD specific chain
+ * @chain: the API CMD specific chain to create
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ **/
+static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain,
+ struct hinic_api_cmd_chain_attr *attr)
+{
+ struct hinic_hwdev *hwdev = attr->hwdev;
+ struct hinic_api_cmd_chain *chain;
+ int err;
+
+ if (attr->num_cells & (attr->num_cells - 1)) {
+ sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n");
+ return -EINVAL;
+ }
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ return -ENOMEM;
+
+ chain->hwdev = hwdev;
+
+ err = api_chain_init(chain, attr);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n");
+ goto chain_init_err;
+ }
+
+ err = api_cmd_create_cells(chain);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n");
+ goto create_cells_err;
+ }
+
+ err = api_cmd_chain_hw_init(chain);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n");
+ goto chain_hw_init_err;
+ }
+
+ *cmd_chain = chain;
+ return 0;
+
+chain_hw_init_err:
+create_cells_err:
+ api_chain_free(chain);
+
+chain_init_err:
+ kfree(chain);
+ return err;
+}
+
+/**
+ * api_cmd_destroy_chain - destroy API CMD specific chain
+ * @chain: the API CMD specific chain to destroy
+ **/
+static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
+{
+ api_chain_free(chain);
+ kfree(chain);
+}
+
+/**
+ * hinic_api_cmd_init - Initialize all the API CMD chains
+ * @hwif: the hardware interface of a pci function device
+ * @chain: the API CMD chains that will be initialized
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_api_cmd_init(struct hinic_hwdev *hwdev,
+ struct hinic_api_cmd_chain **chain)
+{
+ void *dev = hwdev->dev_hdl;
+ struct hinic_api_cmd_chain_attr attr;
+ enum hinic_api_cmd_chain_type chain_type, i;
+ int err;
+
+ attr.hwdev = hwdev;
+ attr.num_cells = API_CHAIN_NUM_CELLS;
+ attr.cell_size = API_CHAIN_CELL_SIZE;
+ attr.rsp_size = API_CHAIN_RSP_DATA_SIZE;
+
+ chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+ for (; chain_type < HINIC_API_CMD_MAX; chain_type++) {
+ attr.chain_type = chain_type;
+
+ err = api_cmd_create_chain(&chain[chain_type], &attr);
+ if (err) {
+ sdk_err(dev, "Failed to create chain %d\n", chain_type);
+ goto create_chain_err;
+ }
+ }
+
+ return 0;
+
+create_chain_err:
+ i = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+ for (; i < chain_type; i++)
+ api_cmd_destroy_chain(chain[i]);
+
+ return err;
+}
+
+/**
+ * hinic_api_cmd_free - free the API CMD chains
+ * @chain: the API CMD chains that will be freed
+ **/
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
+{
+ enum hinic_api_cmd_chain_type chain_type;
+
+ chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+
+ for (; chain_type < HINIC_API_CMD_MAX; chain_type++)
+ api_cmd_destroy_chain(chain[chain_type]);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h
b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h
new file mode 100644
index 000000000000..06060b0ae480
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_API_CMD_H_
+#define HINIC_API_CMD_H_
+
+#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU
+
+#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
+#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
+#define HINIC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3
+#define HINIC_API_CMD_DESC_PRIV_DATA_SHIFT 8
+#define HINIC_API_CMD_DESC_DEST_SHIFT 32
+#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1U
+#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1U
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U
+#define HINIC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U
+#define HINIC_API_CMD_DESC_DEST_MASK 0x1FU
+#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FFU
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU
+#define HINIC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU
+
+#define HINIC_API_CMD_DESC_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
+ HINIC_API_CMD_DESC_##member##_SHIFT)
+
+#define HINIC_API_CMD_STATUS_HEADER_VALID_SHIFT 0
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
+
+#define HINIC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU
+
+#define HINIC_API_CMD_STATUS_VALID_CODE 0xFF
+
+#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
+#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U
+
+#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
+ (((val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
+ (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
+ HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
+
+#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \
+ << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U
+
+#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
+ (((val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \
+ << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
+
+#define HINIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF
+#define HINIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF
+
+#define HINIC_API_CMD_RESP_HEADER_VALID(val) \
+ (((val) & HINIC_API_CMD_RESP_HEAD_VALID_MASK) == \
+ HINIC_API_CMD_RESP_HEAD_VALID_CODE)
+
+#define HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8
+#define HINIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU
+
+#define HINIC_API_CMD_RESP_HEAD_ERR_CODE 0x1
+#define HINIC_API_CMD_RESP_HEAD_ERR(val) \
+ ((((val) >> HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_STATUS_MASK) == \
+ HINIC_API_CMD_RESP_HEAD_ERR_CODE)
+
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFF
+
+#define HINIC_API_CMD_RESP_RESERVED 3
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \
+ (((val) >> HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK)
+
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU
+
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \
+ (u16)(((val) >> HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)
+
+#define HINIC_API_CMD_STATUS_HEAD_VALID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEAD_VALID_SHIFT 0
+
+#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT 16
+
+#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU
+#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
+
+#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU
+#define HINIC_API_CMD_STATUS_FSM_SHIFT 24
+
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
+
+#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
+#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
+
+#define HINIC_API_CMD_STATUS_CHAIN_ID(val) \
+ (((val) >> HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT) & \
+ HINIC_API_CMD_STATUS_HEAD_VALID_MASK)
+
+#define HINIC_API_CMD_STATUS_CONS_IDX(val) \
+ ((val) & HINIC_API_CMD_STATUS_CONS_IDX_MASK)
+
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR(val) \
+ (((val) >> HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \
+ HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK)
+
+#define HINIC_API_CMD_STATUS_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_##member##_MASK)
+
+enum hinic_api_cmd_chain_type {
+ /* write command with completion notification */
+ HINIC_API_CMD_WRITE = 0,
+ /* read command with completion notification */
+ HINIC_API_CMD_READ = 1,
+ /* write to mgmt cpu command with completion */
+ HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2,
+ /* multi read command with completion notification - not used */
+ HINIC_API_CMD_MULTI_READ = 3,
+ /* write command without completion notification */
+ HINIC_API_CMD_POLL_WRITE = 4,
+ /* read command without completion notification */
+ HINIC_API_CMD_POLL_READ = 5,
+ /* read from mgmt cpu command with completion */
+ HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6,
+ HINIC_API_CMD_MAX,
+};
+
+struct hinic_api_cmd_status {
+ u64 header;
+ u32 buf_desc;
+ u32 cell_addr_hi;
+ u32 cell_addr_lo;
+ u32 rsvd0;
+ u64 rsvd1;
+};
+
+/* HW struct */
+struct hinic_api_cmd_cell {
+ u64 ctrl;
+
+ /* address is 64 bit in HW struct */
+ u64 next_cell_paddr;
+
+ u64 desc;
+
+ /* HW struct */
+ union {
+ struct {
+ u64 hw_cmd_paddr;
+ } write;
+
+ struct {
+ u64 hw_wb_resp_paddr;
+ u64 hw_cmd_paddr;
+ } read;
+ };
+};
+
+struct hinic_api_cmd_resp_fmt {
+ u64 header;
+ u64 rsvd[3];
+ u64 resp_data;
+};
+
+struct hinic_api_cmd_cell_ctxt {
+ struct hinic_api_cmd_cell *cell_vaddr;
+
+ void *api_cmd_vaddr;
+
+ struct hinic_api_cmd_resp_fmt *resp;
+
+ struct completion done;
+ int status;
+
+ u32 saved_prod_idx;
+};
+
+struct hinic_api_cmd_chain_attr {
+ struct hinic_hwdev *hwdev;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 rsp_size;
+ u16 cell_size;
+};
+
+struct hinic_api_cmd_chain {
+ struct hinic_hwdev *hwdev;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 cell_size;
+ u16 rsp_size;
+
+ /* HW members is 24 bit format */
+ u32 prod_idx;
+ u32 cons_idx;
+
+ struct semaphore sem;
+ /* Async cmd can not be scheduling */
+ spinlock_t async_lock;
+
+ dma_addr_t wb_status_paddr;
+ struct hinic_api_cmd_status *wb_status;
+
+ dma_addr_t head_cell_paddr;
+ struct hinic_api_cmd_cell *head_node;
+
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_api_cmd_cell *curr_node;
+
+ struct hinic_dma_addr_align cells_addr;
+
+ u8 *cell_vaddr_base;
+ u64 cell_paddr_base;
+ u8 *rsp_vaddr_base;
+ u64 rsp_paddr_base;
+ u8 *buf_vaddr_base;
+ u64 buf_paddr_base;
+ u64 cell_size_align;
+ u64 rsp_size_align;
+ u64 buf_size_align;
+};
+
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size);
+
+int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size,
+ void *ack, u16 ack_size);
+
+int hinic_api_cmd_init(struct hinic_hwdev *hwdev,
+ struct hinic_api_cmd_chain **chain);
+
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
new file mode 100644
index 000000000000..60a2faf30d55
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
@@ -0,0 +1,2311 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/vmalloc.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwif.h"
+#include "hinic_mbox.h"
+#include "hinic_cfg.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_multi_host_mgmt.h"
+uint intr_mode;
+
+uint timer_enable = 1;
+uint bloomfilter_enable = 1;
+uint g_test_qpc_num;
+uint g_test_qpc_resvd_num;
+uint g_test_pagesize_reorder;
+uint g_test_xid_alloc_mode = 1;
+uint g_test_gpa_check_enable = 1;
+uint g_test_qpc_alloc_mode = 2;
+uint g_test_scqc_alloc_mode = 2;
+uint g_test_max_conn;
+uint g_test_max_cache_conn;
+uint g_test_scqc_num;
+uint g_test_mpt_num;
+uint g_test_mpt_resvd;
+uint g_test_scq_resvd;
+uint g_test_hash_num;
+uint g_test_reorder_num;
+
+static void set_cfg_test_param(struct cfg_mgmt_info *cfg_mgmt)
+{
+ cfg_mgmt->svc_cap.timer_en = (u8)timer_enable;
+ cfg_mgmt->svc_cap.bloomfilter_en = (u8)bloomfilter_enable;
+ cfg_mgmt->svc_cap.test_qpc_num = g_test_qpc_num;
+ cfg_mgmt->svc_cap.test_qpc_resvd_num = g_test_qpc_resvd_num;
+ cfg_mgmt->svc_cap.test_page_size_reorder = g_test_pagesize_reorder;
+ cfg_mgmt->svc_cap.test_xid_alloc_mode = (bool)g_test_xid_alloc_mode;
+ cfg_mgmt->svc_cap.test_gpa_check_enable = (bool)g_test_gpa_check_enable;
+ cfg_mgmt->svc_cap.test_qpc_alloc_mode = (u8)g_test_qpc_alloc_mode;
+ cfg_mgmt->svc_cap.test_scqc_alloc_mode = (u8)g_test_scqc_alloc_mode;
+ cfg_mgmt->svc_cap.test_max_conn_num = g_test_max_conn;
+ cfg_mgmt->svc_cap.test_max_cache_conn_num = g_test_max_cache_conn;
+ cfg_mgmt->svc_cap.test_scqc_num = g_test_scqc_num;
+ cfg_mgmt->svc_cap.test_mpt_num = g_test_mpt_num;
+ cfg_mgmt->svc_cap.test_scq_resvd_num = g_test_scq_resvd;
+ cfg_mgmt->svc_cap.test_mpt_recvd_num = g_test_mpt_resvd;
+ cfg_mgmt->svc_cap.test_hash_num = g_test_hash_num;
+ cfg_mgmt->svc_cap.test_reorder_num = g_test_reorder_num;
+}
+
+int hinic_sync_time(void *hwdev, u64 time)
+{
+ struct hinic_sync_time_info time_info = {0};
+ u16 out_size = sizeof(time_info);
+ int err;
+
+ time_info.mstime = time;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SYNC_TIME, &time_info,
+ sizeof(time_info), &time_info, &out_size,
+ 0);
+ if (err || time_info.status || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, time_info.status, out_size);
+ }
+
+ return err;
+}
+
+static void parse_sf_en_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap, enum func_type type)
+{
+ struct dev_sf_svc_attr *attr = &cap->sf_svc_attr;
+
+ if (type == TYPE_PPF) {
+ /* For PPF's SF EN flag, we assign it in get_dynamic_res_cap().
+ * we only save its VF's flag.
+ */
+ attr->sf_en_vf = dev_cap->sf_en_vf;
+ } else if (type == TYPE_PF) {
+ if (dev_cap->sf_en_pf)
+ cap->sf_en = true;
+ else
+ cap->sf_en = false;
+
+ attr->sf_en_vf = dev_cap->sf_en_vf;
+ } else {
+ /* VF gets SF_EN_VF from PPF/PF */
+ if (dev_cap->sf_en_vf)
+ cap->sf_en = true;
+ else
+ cap->sf_en = false;
+
+ attr->sf_en_vf = 0;
+ }
+}
+
+static void parse_pub_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_sf_svc_attr *attr = &cap->sf_svc_attr;
+
+ cap->svc_type = dev_cap->svc_cap_en;
+ cap->chip_svc_type = cap->svc_type;
+
+ if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT)
+ attr->ft_en = true;
+ else
+ attr->ft_en = false;
+
+ if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT)
+ attr->rdma_en = true;
+ else
+ attr->rdma_en = false;
+
+ cap->host_id = dev_cap->host_id;
+ cap->ep_id = dev_cap->ep_id;
+
+ cap->max_cos_id = dev_cap->max_cos_id;
+ cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap;
+ cap->er_id = dev_cap->er_id;
+ cap->port_id = dev_cap->port_id;
+
+ parse_sf_en_cap(cap, dev_cap, type);
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ cap->max_vf = dev_cap->max_vf;
+ cap->pf_num = dev_cap->pf_num;
+ cap->pf_id_start = dev_cap->pf_id_start;
+ cap->vf_num = dev_cap->vf_num;
+ cap->vf_id_start = dev_cap->vf_id_start;
+
+ /* FC need max queue number, but max queue number info is in
+ * l2nic cap, we also put max queue num info in public cap, so
+ * FC can get correct max queue number info.
+ */
+ cap->max_sqs = dev_cap->nic_max_sq + 1;
+ cap->max_rqs = dev_cap->nic_max_rq + 1;
+ } else {
+ cap->max_vf = 0;
+ cap->max_sqs = dev_cap->nic_max_sq;
+ cap->max_rqs = dev_cap->nic_max_rq;
+ }
+
+ cap->host_total_function = dev_cap->host_total_func;
+ cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
+ cap->max_connect_num = dev_cap->max_conn_num;
+ cap->max_stick2cache_num = dev_cap->max_stick2cache_num;
+ cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr;
+ cap->bfilter_len = dev_cap->bfilter_len;
+ cap->hash_bucket_num = dev_cap->hash_bucket_num;
+ cap->dev_ver_info.cfg_file_ver = dev_cap->cfg_file_ver;
+ cap->net_port_mode = dev_cap->net_port_mode;
+
+ /* FC does not use VF */
+ if (cap->net_port_mode == CFG_NET_MODE_FC)
+ cap->max_vf = 0;
+
+ pr_info("Get public resource capbility, svc_cap_en: 0x%x\n",
+ dev_cap->svc_cap_en);
+ pr_info("Host_id=0x%x, ep_id=0x%x, max_cos_id=0x%x, cos_bitmap=0x%x, er_id=0x%x,
port_id=0x%x\n",
+ cap->host_id, cap->ep_id,
+ cap->max_cos_id, cap->cos_valid_bitmap,
+ cap->er_id, cap->port_id);
+ pr_info("Host_total_function=0x%x, host_oq_id_mask_val=0x%x, net_port_mode=0x%x,
max_vf=0x%x\n",
+ cap->host_total_function, cap->host_oq_id_mask_val,
+ cap->net_port_mode, cap->max_vf);
+
+ pr_info("Pf_num=0x%x, pf_id_start=0x%x, vf_num=0x%x, vf_id_start=0x%x\n",
+ cap->pf_num, cap->pf_id_start,
+ cap->vf_num, cap->vf_id_start);
+
+ /* Check parameters from firmware */
+ if (cap->max_sqs > HINIC_CFG_MAX_QP ||
+ cap->max_rqs > HINIC_CFG_MAX_QP) {
+ pr_info("Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n",
+ HINIC_CFG_MAX_QP, cap->max_sqs, cap->max_rqs);
+ cap->max_sqs = HINIC_CFG_MAX_QP;
+ cap->max_rqs = HINIC_CFG_MAX_QP;
+ }
+}
+
+static void parse_dynamic_share_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap;
+
+ shared_cap->host_pctxs = dev_cap->host_pctx_num;
+
+ if (dev_cap->host_sf_en)
+ cap->sf_en = true;
+ else
+ cap->sf_en = false;
+
+ shared_cap->host_cctxs = dev_cap->host_ccxt_num;
+ shared_cap->host_scqs = dev_cap->host_scq_num;
+ shared_cap->host_srqs = dev_cap->host_srq_num;
+ shared_cap->host_mpts = dev_cap->host_mpt_num;
+
+ pr_info("Dynamic share resource capbility, host_pctxs=0x%x, host_cctxs=0x%x,
host_scqs=0x%x, host_srqs=0x%x, host_mpts=0x%x\n",
+ shared_cap->host_pctxs,
+ shared_cap->host_cctxs,
+ shared_cap->host_scqs,
+ shared_cap->host_srqs,
+ shared_cap->host_mpts);
+}
+
+static void parse_l2nic_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct nic_service_cap *nic_cap = &cap->nic_cap;
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ nic_cap->max_sqs = dev_cap->nic_max_sq + 1;
+ nic_cap->max_rqs = dev_cap->nic_max_rq + 1;
+ nic_cap->vf_max_sqs = dev_cap->nic_vf_max_sq + 1;
+ nic_cap->vf_max_rqs = dev_cap->nic_vf_max_rq + 1;
+ } else {
+ nic_cap->max_sqs = dev_cap->nic_max_sq;
+ nic_cap->max_rqs = dev_cap->nic_max_rq;
+ nic_cap->vf_max_sqs = 0;
+ nic_cap->vf_max_rqs = 0;
+ }
+
+ if (dev_cap->nic_lro_en)
+ nic_cap->lro_en = true;
+ else
+ nic_cap->lro_en = false;
+
+ nic_cap->lro_sz = dev_cap->nic_lro_sz;
+ nic_cap->tso_sz = dev_cap->nic_tso_sz;
+
+ pr_info("L2nic resource capbility, max_sqs=0x%x, max_rqs=0x%x, vf_max_sqs=0x%x,
vf_max_rqs=0x%x\n",
+ nic_cap->max_sqs,
+ nic_cap->max_rqs,
+ nic_cap->vf_max_sqs,
+ nic_cap->vf_max_rqs);
+
+ /* Check parameters from firmware */
+ if (nic_cap->max_sqs > HINIC_CFG_MAX_QP ||
+ nic_cap->max_rqs > HINIC_CFG_MAX_QP) {
+ pr_info("Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n",
+ HINIC_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs);
+ nic_cap->max_sqs = HINIC_CFG_MAX_QP;
+ nic_cap->max_rqs = HINIC_CFG_MAX_QP;
+ }
+}
+
+static void parse_roce_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_roce_svc_own_cap *roce_cap =
+ &cap->rdma_cap.dev_rdma_cap.roce_own_cap;
+
+ roce_cap->max_qps = dev_cap->roce_max_qp;
+ roce_cap->max_cqs = dev_cap->roce_max_cq;
+ roce_cap->max_srqs = dev_cap->roce_max_srq;
+ roce_cap->max_mpts = dev_cap->roce_max_mpt;
+ roce_cap->num_cos = dev_cap->max_cos_id + 1;
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ roce_cap->vf_max_qps = dev_cap->roce_vf_max_qp;
+ roce_cap->vf_max_cqs = dev_cap->roce_vf_max_cq;
+ roce_cap->vf_max_srqs = dev_cap->roce_vf_max_srq;
+ roce_cap->vf_max_mpts = dev_cap->roce_vf_max_mpt;
+ } else {
+ roce_cap->vf_max_qps = 0;
+ roce_cap->vf_max_cqs = 0;
+ roce_cap->vf_max_srqs = 0;
+ roce_cap->vf_max_mpts = 0;
+ }
+
+ roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start;
+ roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end;
+ roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size;
+
+ roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start;
+ roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end;
+ roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size;
+
+ roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start;
+ roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end;
+ roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size;
+
+ pr_info("Get roce resource capbility\n");
+ pr_info("Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_mpts=0x%x\n",
+ roce_cap->max_qps, roce_cap->max_cqs,
+ roce_cap->max_srqs, roce_cap->max_mpts);
+
+ pr_info("Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_srqs= 0x%x, vf_max_mpts=
0x%x\n",
+ roce_cap->vf_max_qps, roce_cap->vf_max_cqs,
+ roce_cap->vf_max_srqs, roce_cap->vf_max_mpts);
+
+ pr_info("Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n",
+ roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end,
+ roce_cap->cmtt_cl_sz);
+
+ pr_info("Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n",
+ roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end,
+ roce_cap->dmtt_cl_sz);
+
+ pr_info("Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n",
+ roce_cap->wqe_cl_start, roce_cap->wqe_cl_end,
+ roce_cap->wqe_cl_sz);
+
+ if (roce_cap->max_qps == 0) {
+ roce_cap->max_qps = 1024;
+ roce_cap->max_cqs = 2048;
+ roce_cap->max_srqs = 1024;
+ roce_cap->max_mpts = 1024;
+
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ roce_cap->vf_max_qps = 512;
+ roce_cap->vf_max_cqs = 1024;
+ roce_cap->vf_max_srqs = 512;
+ roce_cap->vf_max_mpts = 512;
+ }
+ }
+}
+
+static void parse_iwarp_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+
+{
+ struct dev_iwarp_svc_own_cap *iwarp_cap =
+ &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap;
+
+ iwarp_cap->max_qps = dev_cap->iwarp_max_qp;
+ iwarp_cap->max_cqs = dev_cap->iwarp_max_cq;
+ iwarp_cap->max_mpts = dev_cap->iwarp_max_mpt;
+ iwarp_cap->num_cos = dev_cap->max_cos_id + 1;
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ iwarp_cap->vf_max_qps = dev_cap->iwarp_vf_max_qp;
+ iwarp_cap->vf_max_cqs = dev_cap->iwarp_vf_max_cq;
+ iwarp_cap->vf_max_mpts = dev_cap->iwarp_vf_max_mpt;
+ } else {
+ iwarp_cap->vf_max_qps = 0;
+ iwarp_cap->vf_max_cqs = 0;
+ iwarp_cap->vf_max_mpts = 0;
+ }
+
+ iwarp_cap->cmtt_cl_start = dev_cap->iwarp_cmtt_cl_start;
+ iwarp_cap->cmtt_cl_end = dev_cap->iwarp_cmtt_cl_end;
+ iwarp_cap->cmtt_cl_sz = dev_cap->iwarp_cmtt_cl_size;
+
+ iwarp_cap->dmtt_cl_start = dev_cap->iwarp_dmtt_cl_start;
+ iwarp_cap->dmtt_cl_end = dev_cap->iwarp_dmtt_cl_end;
+ iwarp_cap->dmtt_cl_sz = dev_cap->iwarp_dmtt_cl_size;
+
+ iwarp_cap->wqe_cl_start = dev_cap->iwarp_wqe_cl_start;
+ iwarp_cap->wqe_cl_end = dev_cap->iwarp_wqe_cl_end;
+ iwarp_cap->wqe_cl_sz = dev_cap->iwarp_wqe_cl_size;
+
+ pr_info("Get iwrap resource capbility\n");
+ pr_info("Max_qps=0x%x, max_cqs=0x%x, max_mpts=0x%x\n",
+ iwarp_cap->max_qps, iwarp_cap->max_cqs,
+ iwarp_cap->max_mpts);
+ pr_info("Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_mpts=0x%x\n",
+ iwarp_cap->vf_max_qps, iwarp_cap->vf_max_cqs,
+ iwarp_cap->vf_max_mpts);
+
+ pr_info("Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n",
+ iwarp_cap->cmtt_cl_start, iwarp_cap->cmtt_cl_end,
+ iwarp_cap->cmtt_cl_sz);
+
+ pr_info("Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n",
+ iwarp_cap->dmtt_cl_start, iwarp_cap->dmtt_cl_end,
+ iwarp_cap->dmtt_cl_sz);
+
+ pr_info("Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n",
+ iwarp_cap->wqe_cl_start, iwarp_cap->wqe_cl_end,
+ iwarp_cap->wqe_cl_sz);
+
+ if (iwarp_cap->max_qps == 0) {
+ iwarp_cap->max_qps = 8;
+ iwarp_cap->max_cqs = 16;
+ iwarp_cap->max_mpts = 8;
+
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ iwarp_cap->vf_max_qps = 8;
+ iwarp_cap->vf_max_cqs = 16;
+ iwarp_cap->vf_max_mpts = 8;
+ }
+ }
+}
+
+static void parse_fcoe_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_fcoe_svc_cap *fcoe_cap = &cap->fcoe_cap.dev_fcoe_cap;
+
+ fcoe_cap->max_qps = dev_cap->fcoe_max_qp;
+ fcoe_cap->max_cqs = dev_cap->fcoe_max_cq;
+ fcoe_cap->max_srqs = dev_cap->fcoe_max_srq;
+ fcoe_cap->max_cctxs = dev_cap->fcoe_max_cctx;
+ fcoe_cap->cctxs_id_start = dev_cap->fcoe_cctx_id_start;
+ fcoe_cap->vp_id_start = dev_cap->fcoe_vp_id_start;
+ fcoe_cap->vp_id_end = dev_cap->fcoe_vp_id_end;
+
+ pr_info("Get fcoe resource capbility\n");
+ pr_info("Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_cctxs=0x%x,
cctxs_id_start=0x%x\n",
+ fcoe_cap->max_qps, fcoe_cap->max_cqs, fcoe_cap->max_srqs,
+ fcoe_cap->max_cctxs, fcoe_cap->cctxs_id_start);
+ pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n",
+ fcoe_cap->vp_id_start, fcoe_cap->vp_id_end);
+}
+
+static void parse_toe_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap;
+
+ toe_cap->max_pctxs = dev_cap->toe_max_pctx;
+ toe_cap->max_cqs = dev_cap->toe_max_cq;
+ toe_cap->max_srqs = dev_cap->toe_max_srq;
+ toe_cap->srq_id_start = dev_cap->toe_srq_id_start;
+ toe_cap->num_cos = dev_cap->max_cos_id + 1;
+
+ pr_info("Get toe resource capbility, max_pctxs=0x%x, max_cqs=0x%x, max_srqs=0x%x,
srq_id_start=0x%x\n",
+ toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs,
+ toe_cap->srq_id_start);
+}
+
+static void parse_fc_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap;
+
+ fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx;
+ fc_cap->scq_num = dev_cap->fc_max_scq;
+ fc_cap->srq_num = dev_cap->fc_max_srq;
+ fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx;
+ fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start;
+ fc_cap->vp_id_start = dev_cap->fc_vp_id_start;
+ fc_cap->vp_id_end = dev_cap->fc_vp_id_end;
+
+ pr_info("Get fc resource capbility\n");
+ pr_info("Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x,
max_child_qpc_num=0x%x, child_qpc_id_start=0x%x\n",
+ fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num,
+ fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start);
+ pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n",
+ fc_cap->vp_id_start, fc_cap->vp_id_end);
+}
+
+static void parse_ovs_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct ovs_service_cap *ovs_cap = &cap->ovs_cap;
+
+ ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc;
+ ovs_cap->dev_ovs_cap.max_cqs = 0;
+
+ pr_info("Get ovs resource capbility, max_qpc: 0x%x\n",
+ ovs_cap->dev_ovs_cap.max_pctxs);
+}
+
+static void parse_acl_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct acl_service_cap *acl_cap = &cap->acl_cap;
+
+ acl_cap->dev_acl_cap.max_pctxs = 1024 * 1024;
+ acl_cap->dev_acl_cap.max_cqs = 8;
+}
+
+static void parse_dev_cap(struct hinic_hwdev *dev,
+ struct hinic_dev_cap *dev_cap, enum func_type type)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+
+ /* Public resource */
+ parse_pub_res_cap(cap, dev_cap, type);
+
+ /* PPF managed dynamic resource */
+ if (type == TYPE_PPF)
+ parse_dynamic_share_res_cap(cap, dev_cap, type);
+
+ /* L2 NIC resource */
+ if (IS_NIC_TYPE(dev))
+ parse_l2nic_res_cap(cap, dev_cap, type);
+
+ /* RoCE resource */
+ if (IS_ROCE_TYPE(dev))
+ parse_roce_res_cap(cap, dev_cap, type);
+
+ /* iWARP resource */
+ if (IS_IWARP_TYPE(dev))
+ parse_iwarp_res_cap(cap, dev_cap, type);
+
+ /* FCoE/IOE/TOE/FC without virtulization */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ if (IS_FCOE_TYPE(dev))
+ parse_fcoe_res_cap(cap, dev_cap, type);
+
+ if (IS_TOE_TYPE(dev))
+ parse_toe_res_cap(cap, dev_cap, type);
+
+ if (IS_FC_TYPE(dev))
+ parse_fc_res_cap(cap, dev_cap, type);
+ }
+
+ if (IS_OVS_TYPE(dev))
+ parse_ovs_res_cap(cap, dev_cap, type);
+
+ if (IS_ACL_TYPE(dev))
+ parse_acl_res_cap(cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type)
+{
+ struct hinic_dev_cap dev_cap = {0};
+ u16 out_len = sizeof(dev_cap);
+ int err;
+
+ dev_cap.version = HINIC_CMD_VER_FUNC_ID;
+ dev_cap.func_id = hinic_global_func_id(dev);
+
+ sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n",
+ dev_cap.func_id);
+
+ err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP,
+ &dev_cap, sizeof(dev_cap),
+ &dev_cap, &out_len, 0);
+ if (err || dev_cap.status || !out_len) {
+ sdk_err(dev->dev_hdl,
+ "Failed to get capability from FW, err: %d, status: 0x%x, out size:
0x%x\n",
+ err, dev_cap.status, out_len);
+ return -EFAULT;
+ }
+
+ parse_dev_cap(dev, &dev_cap, type);
+ return 0;
+}
+
+static int get_cap_from_pf(struct hinic_hwdev *dev, enum func_type type)
+{
+ int err;
+ u16 in_len, out_len;
+ struct hinic_dev_cap dev_cap = {0};
+
+ in_len = sizeof(dev_cap);
+ out_len = in_len;
+
+ err = hinic_mbox_to_pf(dev, HINIC_MOD_CFGM, HINIC_CFG_MBOX_CAP,
+ &dev_cap, in_len, &dev_cap, &out_len,
+ CFG_MAX_CMD_TIMEOUT);
+ if (err || dev_cap.status || !out_len) {
+ sdk_err(dev->dev_hdl, "Failed to get capability from PF, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, dev_cap.status, out_len);
+ return err;
+ }
+
+ parse_dev_cap(dev, &dev_cap, type);
+ return 0;
+}
+
+static int get_dev_cap(struct hinic_hwdev *dev)
+{
+ int err;
+ enum func_type type = HINIC_FUNC_TYPE(dev);
+
+ switch (type) {
+ case TYPE_PF:
+ case TYPE_PPF:
+ err = get_cap_from_fw(dev, type);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n");
+ return err;
+ }
+ break;
+ case TYPE_VF:
+ err = get_cap_from_pf(dev, type);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to get VF capability\n");
+ return err;
+ }
+ break;
+ default:
+ sdk_err(dev->dev_hdl, "Unsupported PCI Function type: %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void nic_param_fix(struct hinic_hwdev *dev)
+{
+}
+
+static void rdma_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct rdma_service_cap *rdma_cap = &cap->rdma_cap;
+ struct dev_roce_svc_own_cap *roce_cap =
+ &rdma_cap->dev_rdma_cap.roce_own_cap;
+ struct dev_iwarp_svc_own_cap *iwarp_cap =
+ &rdma_cap->dev_rdma_cap.iwarp_own_cap;
+
+ rdma_cap->log_mtt = LOG_MTT_SEG;
+ rdma_cap->log_rdmarc = LOG_RDMARC_SEG;
+ rdma_cap->reserved_qps = RDMA_RSVD_QPS;
+ rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE;
+
+ /* RoCE */
+ if (IS_ROCE_TYPE(dev)) {
+ roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ;
+ roce_cap->max_wqes = ROCE_MAX_WQES;
+ roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE;
+ roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ;
+ roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ;
+ roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ;
+ roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA;
+ roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA;
+ roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES;
+ roce_cap->reserved_srqs = ROCE_RSVD_SRQS;
+ roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE;
+ roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ;
+ roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ;
+ } else {
+ iwarp_cap->qpc_entry_sz = IWARP_QPC_ENTRY_SZ;
+ iwarp_cap->max_wqes = IWARP_MAX_WQES;
+ iwarp_cap->max_rq_sg = IWARP_MAX_RQ_SGE;
+ iwarp_cap->max_sq_inline_data_sz = IWARP_MAX_SQ_INLINE_DATA_SZ;
+ iwarp_cap->max_rq_desc_sz = IWARP_MAX_RQ_DESC_SZ;
+ iwarp_cap->max_irq_depth = IWARP_MAX_IRQ_DEPTH;
+ iwarp_cap->irq_entry_size = IWARP_IRQ_ENTRY_SZ;
+ iwarp_cap->max_orq_depth = IWARP_MAX_ORQ_DEPTH;
+ iwarp_cap->orq_entry_size = IWARP_ORQ_ENTRY_SZ;
+ iwarp_cap->max_rtoq_depth = IWARP_MAX_RTOQ_DEPTH;
+ iwarp_cap->rtoq_entry_size = IWARP_RTOQ_ENTRY_SZ;
+ iwarp_cap->max_ackq_depth = IWARP_MAX_ACKQ_DEPTH;
+ iwarp_cap->ackq_entry_size = IWARP_ACKQ_ENTRY_SZ;
+ iwarp_cap->max_msg_sz = IWARP_MAX_MSG_SZ;
+ }
+
+ rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ;
+ rdma_cap->wqebb_size = WQEBB_SZ;
+ rdma_cap->max_cqes = RDMA_MAX_CQES;
+ rdma_cap->reserved_cqs = RDMA_RSVD_CQS;
+ rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ;
+ rdma_cap->cqe_size = RDMA_CQE_SZ;
+ rdma_cap->reserved_mrws = RDMA_RSVD_MRWS;
+ rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ;
+
+ /* 2^8 - 1
+ * +------------------------+-----------+
+ * | 4B | 1M(20b) | Key(8b) |
+ * +------------------------+-----------+
+ * key = 8bit key + 24bit index,
+ * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit,
+ * we use original 8bits directly for simpilification
+ */
+ rdma_cap->max_fmr_maps = 255;
+ rdma_cap->num_mtts = RDMA_NUM_MTTS;
+ rdma_cap->log_mtt_seg = LOG_MTT_SEG;
+ rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ;
+ rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG;
+ rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY;
+ rdma_cap->num_ports = RDMA_NUM_PORTS;
+ rdma_cap->db_page_size = DB_PAGE_SZ;
+ rdma_cap->direct_wqe_size = DWQE_SZ;
+ rdma_cap->num_pds = NUM_PD;
+ rdma_cap->reserved_pds = RSVD_PD;
+ rdma_cap->max_xrcds = MAX_XRCDS;
+ rdma_cap->reserved_xrcds = RSVD_XRCDS;
+ rdma_cap->max_gid_per_port = MAX_GID_PER_PORT;
+ rdma_cap->gid_entry_sz = GID_ENTRY_SZ;
+ rdma_cap->reserved_lkey = RSVD_LKEY;
+ /* start */
+ rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq;
+ /* end */
+ rdma_cap->page_size_cap = PAGE_SZ_CAP;
+ rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV |
+ RDMA_BMME_FLAG_REMOTE_INV |
+ RDMA_BMME_FLAG_FAST_REG_WR |
+ RDMA_DEV_CAP_FLAG_XRC |
+ RDMA_DEV_CAP_FLAG_MEM_WINDOW |
+ RDMA_BMME_FLAG_TYPE_2_WIN |
+ RDMA_BMME_FLAG_WIN_TYPE_2B |
+ RDMA_DEV_CAP_FLAG_ATOMIC);
+ rdma_cap->max_frpl_len = MAX_FRPL_LEN;
+ rdma_cap->max_pkeys = MAX_PKEYS;
+}
+
+static void fcoe_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct fcoe_service_cap *fcoe_cap = &cap->fcoe_cap;
+
+ fcoe_cap->qpc_basic_size = FCOE_PCTX_SZ;
+ fcoe_cap->childc_basic_size = FCOE_CCTX_SZ;
+ fcoe_cap->sqe_size = FCOE_SQE_SZ;
+
+ fcoe_cap->scqc_basic_size = FCOE_SCQC_SZ;
+ fcoe_cap->scqe_size = FCOE_SCQE_SZ;
+
+ fcoe_cap->srqc_size = FCOE_SRQC_SZ;
+ fcoe_cap->srqe_size = FCOE_SRQE_SZ;
+}
+
+static void toe_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct toe_service_cap *toe_cap = &cap->toe_cap;
+
+ toe_cap->pctx_sz = TOE_PCTX_SZ;
+ toe_cap->scqc_sz = TOE_CQC_SZ;
+}
+
+static void fc_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct fc_service_cap *fc_cap = &cap->fc_cap;
+
+ fc_cap->parent_qpc_size = FC_PCTX_SZ;
+ fc_cap->child_qpc_size = FC_CCTX_SZ;
+ fc_cap->sqe_size = FC_SQE_SZ;
+
+ fc_cap->scqc_size = FC_SCQC_SZ;
+ fc_cap->scqe_size = FC_SCQE_SZ;
+
+ fc_cap->srqc_size = FC_SRQC_SZ;
+ fc_cap->srqe_size = FC_SRQE_SZ;
+}
+
+static void ovs_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct ovs_service_cap *ovs_cap = &cap->ovs_cap;
+
+ ovs_cap->pctx_sz = OVS_PCTX_SZ;
+ ovs_cap->scqc_sz = OVS_SCQC_SZ;
+}
+
+static void acl_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct acl_service_cap *acl_cap = &cap->acl_cap;
+
+ acl_cap->pctx_sz = ACL_PCTX_SZ;
+ acl_cap->scqc_sz = ACL_SCQC_SZ;
+}
+
+static void init_service_param(struct hinic_hwdev *dev)
+{
+ if (IS_NIC_TYPE(dev))
+ nic_param_fix(dev);
+
+ if (IS_RDMA_TYPE(dev))
+ rdma_param_fix(dev);
+
+ if (IS_FCOE_TYPE(dev))
+ fcoe_param_fix(dev);
+
+ if (IS_TOE_TYPE(dev))
+ toe_param_fix(dev);
+
+ if (IS_FC_TYPE(dev))
+ fc_param_fix(dev);
+
+ if (IS_OVS_TYPE(dev))
+ ovs_param_fix(dev);
+
+ if (IS_ACL_TYPE(dev))
+ acl_param_fix(dev);
+}
+
+static void cfg_get_eq_num(struct hinic_hwdev *dev)
+{
+ struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info;
+
+ eq_info->num_ceq = dev->hwif->attr.num_ceqs;
+ eq_info->num_ceq_remain = eq_info->num_ceq;
+}
+
+static int cfg_init_eq(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ struct cfg_eq *eq;
+ u8 num_ceq, i = 0;
+
+ cfg_get_eq_num(dev);
+ num_ceq = cfg_mgmt->eq_info.num_ceq;
+
+ sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n",
+ cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain);
+
+ if (!num_ceq) {
+ sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+ eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL);
+ if (!eq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_ceq; ++i) {
+ eq[i].eqn = i;
+ eq[i].free = CFG_FREE;
+ eq[i].type = SERVICE_T_MAX;
+ }
+
+ cfg_mgmt->eq_info.eq = eq;
+ mutex_init(&cfg_mgmt->eq_info.eq_mutex);
+
+ return 0;
+}
+
+int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+
+ if (!hwdev || !ver)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+
+ memcpy(ver, &cfg_mgmt->svc_cap.dev_ver_info, sizeof(*ver));
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_dev_ver_info);
+
+int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_eq *eq;
+ int eqn = -EINVAL;
+
+ if (!hwdev || vector < 0)
+ return -EINVAL;
+
+ if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) {
+ sdk_err(dev->dev_hdl,
+ "Service type :%d, only RDMA service could get eqn by vector.\n",
+ type);
+ return -EINVAL;
+ }
+
+ cfg_mgmt = dev->cfg_mgmt;
+ vector = (vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE;
+
+ eq = cfg_mgmt->eq_info.eq;
+ if ((eq[vector].type == SERVICE_T_ROCE ||
+ eq[vector].type == SERVICE_T_IWARP) &&
+ eq[vector].free == CFG_BUSY)
+ eqn = eq[vector].eqn;
+
+ return eqn;
+}
+EXPORT_SYMBOL(hinic_vector_to_eqn);
+
+static int cfg_init_interrupt(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info;
+ u16 intr_num = dev->hwif->attr.num_irqs;
+
+ if (!intr_num) {
+ sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+ irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info),
+ GFP_KERNEL);
+ if (!irq_info->alloc_info)
+ return -ENOMEM;
+
+ irq_info->num_irq_hw = intr_num;
+
+ /* Production requires VF only surppots MSI-X */
+ if (HINIC_FUNC_TYPE(dev) == TYPE_VF)
+ cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX;
+ else
+ cfg_mgmt->svc_cap.interrupt_type = intr_mode;
+ mutex_init(&irq_info->irq_mutex);
+ return 0;
+}
+
+static int cfg_enable_interrupt(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw;
+
+ void *pcidev = dev->pcidev_hdl;
+ struct irq_alloc_info_st *irq_info;
+ struct msix_entry *entry;
+ u16 i = 0;
+ int actual_irq;
+
+ irq_info = cfg_mgmt->irq_param_info.alloc_info;
+
+ sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d.\n",
+ cfg_mgmt->svc_cap.interrupt_type, nreq);
+
+ switch (cfg_mgmt->svc_cap.interrupt_type) {
+ case INTR_TYPE_MSIX:
+ if (!nreq) {
+ sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n");
+ return -EINVAL;
+ }
+ entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; i < nreq; i++)
+ entry[i].entry = i;
+
+ actual_irq = pci_enable_msix_range(pcidev, entry,
+ VECTOR_THRESHOLD, nreq);
+ if (actual_irq < 0) {
+ sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed.\n");
+ kfree(entry);
+ return -ENOMEM;
+ }
+
+ nreq = (u16)actual_irq;
+ cfg_mgmt->irq_param_info.num_total = nreq;
+ cfg_mgmt->irq_param_info.num_irq_remain = nreq;
+ sdk_info(dev->dev_hdl, "Request %d msix vector success.\n",
+ nreq);
+
+ for (i = 0; i < nreq; ++i) {
+ /* u16 driver uses to specify entry, OS writes */
+ irq_info[i].info.msix_entry_idx = entry[i].entry;
+ /* u32 kernel uses to write allocated vector */
+ irq_info[i].info.irq_id = entry[i].vector;
+ irq_info[i].type = SERVICE_T_MAX;
+ irq_info[i].free = CFG_FREE;
+ }
+
+ kfree(entry);
+
+ break;
+
+ default:
+ sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n",
+ cfg_mgmt->svc_cap.interrupt_type);
+ break;
+ }
+
+ return 0;
+}
+
+int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 num,
+ struct irq_info *irq_info_array, u16 *act_num)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_irq_info *irq_info;
+ struct irq_alloc_info_st *alloc_info;
+ int max_num_irq;
+ u16 free_num_irq;
+ int i, j;
+
+ if (!hwdev || !irq_info_array || !act_num)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ irq_info = &cfg_mgmt->irq_param_info;
+ alloc_info = irq_info->alloc_info;
+ max_num_irq = irq_info->num_total;
+ free_num_irq = irq_info->num_irq_remain;
+
+ mutex_lock(&irq_info->irq_mutex);
+
+ if (num > free_num_irq) {
+ if (free_num_irq == 0) {
+ sdk_err(dev->dev_hdl,
+ "no free irq resource in cfg mgmt.\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return -ENOMEM;
+ }
+
+ sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt.\n",
+ free_num_irq);
+ num = free_num_irq;
+ }
+
+ *act_num = 0;
+
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < max_num_irq; j++) {
+ if (alloc_info[j].free == CFG_FREE) {
+ if (irq_info->num_irq_remain == 0) {
+ sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return -EINVAL;
+ }
+ alloc_info[j].type = type;
+ alloc_info[j].free = CFG_BUSY;
+
+ irq_info_array[i].msix_entry_idx =
+ alloc_info[j].info.msix_entry_idx;
+ irq_info_array[i].irq_id =
+ alloc_info[j].info.irq_id;
+ (*act_num)++;
+ irq_info->num_irq_remain--;
+
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&irq_info->irq_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_irqs);
+
+void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_irq_info *irq_info;
+ struct irq_alloc_info_st *alloc_info;
+ int max_num_irq;
+ int i;
+
+ if (!hwdev)
+ return;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ irq_info = &cfg_mgmt->irq_param_info;
+ alloc_info = irq_info->alloc_info;
+ max_num_irq = irq_info->num_total;
+
+ mutex_lock(&irq_info->irq_mutex);
+
+ for (i = 0; i < max_num_irq; i++) {
+ if (irq_id == alloc_info[i].info.irq_id &&
+ type == alloc_info[i].type) {
+ if (alloc_info[i].free == CFG_BUSY) {
+ alloc_info[i].free = CFG_FREE;
+ irq_info->num_irq_remain++;
+ if (irq_info->num_irq_remain > max_num_irq) {
+ sdk_err(dev->dev_hdl, "Find target,but over range\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return;
+ }
+ break;
+ }
+ }
+ }
+
+ if (i >= max_num_irq)
+ sdk_warn(dev->dev_hdl, "Irq %d don`t need to free\n", irq_id);
+
+ mutex_unlock(&irq_info->irq_mutex);
+}
+EXPORT_SYMBOL(hinic_free_irq);
+
+int hinic_vector_to_irq(void *hwdev, enum hinic_service_type type, int vector)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct irq_alloc_info_st *irq_info;
+ int irq = -EINVAL;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) {
+ sdk_err(dev->dev_hdl,
+ "Service type: %u, only RDMA service could get eqn by vector\n",
+ type);
+ return -EINVAL;
+ }
+
+ /* Current RDMA CEQ are 2 - 31, will change in the future */
+ vector = ((vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE);
+
+ irq_info = cfg_mgmt->irq_param_info.alloc_info;
+ if (irq_info[vector].type == SERVICE_T_ROCE ||
+ irq_info[vector].type == SERVICE_T_IWARP)
+ if (irq_info[vector].free == CFG_BUSY)
+ irq = (int)irq_info[vector].info.irq_id;
+
+ return irq;
+}
+EXPORT_SYMBOL(hinic_vector_to_irq);
+
+int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int num,
+ int *ceq_id_array, int *act_num)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_eq_info *eq;
+ int free_ceq;
+ int i, j;
+
+ if (!hwdev || !ceq_id_array || !act_num)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ eq = &cfg_mgmt->eq_info;
+ free_ceq = eq->num_ceq_remain;
+
+ mutex_lock(&eq->eq_mutex);
+
+ if (num > free_ceq) {
+ if (free_ceq <= 0) {
+ sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n");
+ mutex_unlock(&eq->eq_mutex);
+ return -ENOMEM;
+ }
+
+ sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n",
+ free_ceq);
+ }
+
+ *act_num = 0;
+
+ num = min(num, eq->num_ceq - CFG_RDMA_CEQ_BASE);
+ for (i = 0; i < num; i++) {
+ if (eq->num_ceq_remain == 0) {
+ sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n",
+ *act_num, num);
+ mutex_unlock(&eq->eq_mutex);
+ return 0;
+ }
+
+ for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) {
+ if (eq->eq[j].free == CFG_FREE) {
+ eq->eq[j].type = type;
+ eq->eq[j].free = CFG_BUSY;
+ eq->num_ceq_remain--;
+ ceq_id_array[i] = eq->eq[j].eqn;
+ (*act_num)++;
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&eq->eq_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_ceqs);
+
+void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_eq_info *eq;
+ u8 num_ceq;
+ u8 i = 0;
+
+ if (!hwdev)
+ return;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ eq = &cfg_mgmt->eq_info;
+ num_ceq = eq->num_ceq;
+
+ mutex_lock(&eq->eq_mutex);
+
+ for (i = 0; i < num_ceq; i++) {
+ if (ceq_id == eq->eq[i].eqn &&
+ type == cfg_mgmt->eq_info.eq[i].type) {
+ if (eq->eq[i].free == CFG_BUSY) {
+ eq->eq[i].free = CFG_FREE;
+ eq->num_ceq_remain++;
+ if (eq->num_ceq_remain > num_ceq)
+ eq->num_ceq_remain %= num_ceq;
+
+ mutex_unlock(&eq->eq_mutex);
+ return;
+ }
+ }
+ }
+
+ if (i >= num_ceq)
+ sdk_warn(dev->dev_hdl, "ceq %d don`t need to free.\n", ceq_id);
+
+ mutex_unlock(&eq->eq_mutex);
+}
+EXPORT_SYMBOL(hinic_free_ceq);
+
+static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_dev_cap *dev_cap = buf_out;
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct nic_service_cap *nic_cap = &cap->nic_cap;
+ struct dev_roce_svc_own_cap *roce_cap =
+ &cap->rdma_cap.dev_rdma_cap.roce_own_cap;
+ struct dev_iwarp_svc_own_cap *iwarp_cap =
+ &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap;
+ struct dev_ovs_svc_cap *ovs_cap = &cap->ovs_cap.dev_ovs_cap;
+ struct hinic_dev_cap dev_cap_tmp = {0};
+ u16 out_len = 0;
+ u16 func_id;
+ int err;
+
+ memset(dev_cap, 0, sizeof(*dev_cap));
+
+ if (cap->sf_svc_attr.ft_en)
+ dev_cap->sf_svc_attr |= SF_SVC_FT_BIT;
+ else
+ dev_cap->sf_svc_attr &= ~SF_SVC_FT_BIT;
+
+ if (cap->sf_svc_attr.rdma_en)
+ dev_cap->sf_svc_attr |= SF_SVC_RDMA_BIT;
+ else
+ dev_cap->sf_svc_attr &= ~SF_SVC_RDMA_BIT;
+
+ dev_cap->sf_en_vf = cap->sf_svc_attr.sf_en_vf;
+
+ dev_cap->host_id = cap->host_id;
+ dev_cap->ep_id = cap->ep_id;
+ dev_cap->intr_type = cap->interrupt_type;
+ dev_cap->max_cos_id = cap->max_cos_id;
+ dev_cap->er_id = cap->er_id;
+ dev_cap->port_id = cap->port_id;
+ dev_cap->max_vf = cap->max_vf;
+ dev_cap->svc_cap_en = cap->chip_svc_type;
+ dev_cap->host_total_func = cap->host_total_function;
+ dev_cap->host_oq_id_mask_val = cap->host_oq_id_mask_val;
+ dev_cap->net_port_mode = cap->net_port_mode;
+
+ /* Parameters below is uninitialized because NIC and ROCE not use it
+ * max_connect_num
+ * max_stick2cache_num
+ * bfilter_start_addr
+ * bfilter_len
+ * hash_bucket_num
+ * cfg_file_ver
+ */
+
+ /* NIC VF resources */
+ dev_cap->nic_max_sq = nic_cap->vf_max_sqs;
+ dev_cap->nic_max_rq = nic_cap->vf_max_rqs;
+
+ /* ROCE VF resources */
+ dev_cap->roce_max_qp = roce_cap->vf_max_qps;
+ dev_cap->roce_max_cq = roce_cap->vf_max_cqs;
+ dev_cap->roce_max_srq = roce_cap->vf_max_srqs;
+ dev_cap->roce_max_mpt = roce_cap->vf_max_mpts;
+
+ dev_cap->roce_cmtt_cl_start = roce_cap->cmtt_cl_start;
+ dev_cap->roce_cmtt_cl_end = roce_cap->cmtt_cl_end;
+ dev_cap->roce_cmtt_cl_size = roce_cap->cmtt_cl_sz;
+
+ dev_cap->roce_dmtt_cl_start = roce_cap->dmtt_cl_start;
+ dev_cap->roce_dmtt_cl_end = roce_cap->dmtt_cl_end;
+ dev_cap->roce_dmtt_cl_size = roce_cap->dmtt_cl_sz;
+
+ dev_cap->roce_wqe_cl_start = roce_cap->wqe_cl_start;
+ dev_cap->roce_wqe_cl_end = roce_cap->wqe_cl_end;
+ dev_cap->roce_wqe_cl_size = roce_cap->wqe_cl_sz;
+
+ /* Iwarp VF resources */
+ dev_cap->iwarp_max_qp = iwarp_cap->vf_max_qps;
+ dev_cap->iwarp_max_cq = iwarp_cap->vf_max_cqs;
+ dev_cap->iwarp_max_mpt = iwarp_cap->vf_max_mpts;
+
+ /* OVS VF resources */
+ dev_cap->ovs_max_qpc = ovs_cap->max_pctxs;
+
+ *out_size = sizeof(*dev_cap);
+
+ if (!IS_OVS_TYPE(dev))
+ return 0;
+
+ out_len = sizeof(dev_cap_tmp);
+ /* fixed qnum in ovs mode */
+ func_id = vf_id + hinic_glb_pf_vf_offset(hwdev);
+ dev_cap_tmp.func_id = func_id;
+ err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_FUNC_CAP,
+ &dev_cap_tmp, sizeof(dev_cap_tmp),
+ &dev_cap_tmp, &out_len, 0);
+ if (err || !out_len || dev_cap_tmp.status) {
+ sdk_err(dev->dev_hdl,
+ "Get func_id: %u capability from FW failed, err: %d, status: 0x%x, out_size:
0x%x\n",
+ func_id, err, dev_cap_tmp.status, out_len);
+ return -EFAULT;
+ }
+
+ dev_cap->nic_max_sq = dev_cap_tmp.nic_max_sq + 1;
+ dev_cap->nic_max_rq = dev_cap_tmp.nic_max_rq + 1;
+ sdk_info(dev->dev_hdl, "func_id(%u) fixed qnum %u\n",
+ func_id, dev_cap->nic_max_sq);
+
+ return 0;
+}
+
+static int cfg_mbx_ppf_proc_msg(void *hwdev, u16 pf_id, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ sdk_info(dev->dev_hdl, "ppf receive other pf cfgmgmt cmd %d mbox msg\n",
+ cmd);
+
+ return hinic_ppf_process_mbox_msg(hwdev, pf_id, vf_id, HINIC_MOD_CFGM,
+ cmd, buf_in, in_size, buf_out,
+ out_size);
+}
+
+static int cfg_mbx_vf_proc_msg(void *hwdev, u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ *out_size = 0;
+ sdk_err(dev->dev_hdl, "VF msg callback not supported\n");
+
+ return -EOPNOTSUPP;
+}
+
+static int cfg_mbx_init(struct hinic_hwdev *dev, struct cfg_mgmt_info *cfg_mgmt)
+{
+ int err;
+ enum func_type type = dev->hwif->attr.func_type;
+
+ if (type == TYPE_PF) {
+ err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "PF: Register PF mailbox callback failed\n");
+ return err;
+ }
+ } else if (type == TYPE_PPF) {
+ err = hinic_register_ppf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_ppf_proc_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "PPF: Register PPF mailbox callback failed\n");
+ return err;
+ }
+
+ err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "PPF: Register PF mailbox callback failed\n");
+ hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM);
+ return err;
+ }
+ } else if (type == TYPE_VF) {
+ err = hinic_register_vf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_vf_proc_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "VF: Register VF mailbox callback failed\n");
+ return err;
+ }
+ } else {
+ sdk_err(dev->dev_hdl, "Invalid func_type: %d, not supported\n",
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cfg_mbx_cleanup(struct hinic_hwdev *dev)
+{
+ hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM);
+ hinic_unregister_pf_mbox_cb(dev, HINIC_MOD_CFGM);
+ hinic_unregister_vf_mbox_cb(dev, HINIC_MOD_CFGM);
+}
+
+int init_cfg_mgmt(struct hinic_hwdev *dev)
+{
+ int err;
+ struct cfg_mgmt_info *cfg_mgmt;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ dev->cfg_mgmt = cfg_mgmt;
+ cfg_mgmt->hwdev = dev;
+
+ err = cfg_init_eq(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n",
+ err);
+ goto free_mgmt_mem;
+ }
+
+ err = cfg_init_interrupt(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n",
+ err);
+ goto free_eq_mem;
+ }
+
+ err = cfg_enable_interrupt(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n",
+ err);
+ goto free_interrupt_mem;
+ }
+
+ return 0;
+
+free_interrupt_mem:
+ kfree(cfg_mgmt->irq_param_info.alloc_info);
+
+free_eq_mem:
+ kfree(cfg_mgmt->eq_info.eq);
+ cfg_mgmt->eq_info.eq = NULL;
+
+free_mgmt_mem:
+ kfree(cfg_mgmt);
+ return err;
+}
+
+void free_cfg_mgmt(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+
+ /* if the allocated resource were recycled */
+ if (cfg_mgmt->irq_param_info.num_irq_remain !=
+ cfg_mgmt->irq_param_info.num_total ||
+ cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq)
+ sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please
check\n");
+
+ switch (cfg_mgmt->svc_cap.interrupt_type) {
+ case INTR_TYPE_MSIX:
+ pci_disable_msix(dev->pcidev_hdl);
+ break;
+
+ case INTR_TYPE_MSI:
+ pci_disable_msi(dev->pcidev_hdl);
+ break;
+
+ case INTR_TYPE_INT:
+ default:
+ break;
+ }
+
+ kfree(cfg_mgmt->irq_param_info.alloc_info);
+ cfg_mgmt->irq_param_info.alloc_info = NULL;
+ mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex));
+
+ kfree(cfg_mgmt->eq_info.eq);
+ cfg_mgmt->eq_info.eq = NULL;
+ mutex_deinit(&cfg_mgmt->eq_info.eq_mutex);
+
+ kfree(cfg_mgmt);
+}
+
+int init_capability(struct hinic_hwdev *dev)
+{
+ int err;
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+
+ set_cfg_test_param(cfg_mgmt);
+
+ err = cfg_mbx_init(dev, cfg_mgmt);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Configure mailbox init failed, err: %d\n",
+ err);
+ return err;
+ }
+
+ cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false;
+ cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false;
+
+ err = get_dev_cap(dev);
+ if (err) {
+ cfg_mbx_cleanup(dev);
+ return err;
+ }
+
+ init_service_param(dev);
+
+ sdk_info(dev->dev_hdl, "Init capability success\n");
+ return 0;
+}
+
+void free_capability(struct hinic_hwdev *dev)
+{
+ cfg_mbx_cleanup(dev);
+ sdk_info(dev->dev_hdl, "Free capability success");
+}
+
+/* 0 - MSIx, 1 - MSI, 2 - INTx */
+enum intr_type hinic_intr_type(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return INTR_TYPE_NONE;
+
+ return dev->cfg_mgmt->svc_cap.interrupt_type;
+}
+EXPORT_SYMBOL(hinic_intr_type);
+
+bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_NIC_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_nic);
+
+bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_ROCE_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_roce);
+
+bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FCOE_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.fcoe_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_fcoe);
+
+/* Only PPF support it, PF is not */
+bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_TOE_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_toe);
+
+bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_IWARP_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_iwarp);
+
+bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FC_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_fc);
+
+bool hinic_support_fic(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FIC_TYPE(dev))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_fic);
+
+bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_OVS_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_ovs);
+
+bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_ACL_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.acl_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_acl);
+
+bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_RDMA_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_rdma);
+
+bool hinic_support_ft(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FT_TYPE(dev))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_ft);
+
+bool hinic_func_for_mgmt(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ return !dev->cfg_mgmt->svc_cap.chip_svc_type;
+}
+
+int cfg_set_func_sf_en(void *hwdev, u32 enbits, u32 enmask)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct nic_misc_func_sf_enbits *func_sf_enbits;
+ u16 out_size = sizeof(*func_sf_enbits);
+ u16 glb_func_idx;
+ u16 api_info_len;
+ int err;
+
+ api_info_len = sizeof(struct nic_misc_func_sf_enbits);
+ func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL);
+ if (!func_sf_enbits) {
+ sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n");
+ return -ENOMEM;
+ }
+
+ glb_func_idx = hinic_global_func_id(hwdev);
+ func_sf_enbits->function_id = glb_func_idx;
+ func_sf_enbits->stateful_enbits = enbits;
+ func_sf_enbits->stateful_enmask = enmask;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_MISC_SET_FUNC_SF_ENBITS,
+ (void *)func_sf_enbits, api_info_len,
+ (void *)func_sf_enbits, &out_size,
+ VSW_UP_CFG_TIMEOUT);
+ if (err || !out_size || func_sf_enbits->status) {
+ sdk_err(dev->dev_hdl,
+ "Failed to set stateful enable, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, func_sf_enbits->status, out_size);
+ kfree(func_sf_enbits);
+ return -EINVAL;
+ }
+
+ kfree(func_sf_enbits);
+ return 0;
+}
+
+int cfg_get_func_sf_en(void *hwdev, u32 *enbits)
+{
+ struct nic_misc_func_sf_enbits *func_sf_enbits;
+ struct hinic_hwdev *dev = hwdev;
+ u16 out_size = sizeof(*func_sf_enbits);
+ u16 glb_func_idx;
+ u16 api_info_len;
+ int err;
+
+ api_info_len = sizeof(struct nic_misc_func_sf_enbits);
+ func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL);
+ if (!func_sf_enbits) {
+ sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n");
+ return -ENOMEM;
+ }
+
+ glb_func_idx = hinic_global_func_id(hwdev);
+
+ func_sf_enbits->function_id = glb_func_idx;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_MISC_GET_FUNC_SF_ENBITS,
+ (void *)func_sf_enbits, api_info_len,
+ (void *)func_sf_enbits, &out_size,
+ VSW_UP_CFG_TIMEOUT);
+ if (err || !out_size || func_sf_enbits->status) {
+ sdk_err(dev->dev_hdl, "Failed to get stateful enable, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, func_sf_enbits->status, out_size);
+ kfree(func_sf_enbits);
+ return -EINVAL;
+ }
+
+ *enbits = func_sf_enbits->stateful_enbits;
+
+ kfree(func_sf_enbits);
+ return 0;
+}
+
+int hinic_set_toe_enable(void *hwdev, bool enable)
+{
+ u32 enbits;
+ u32 enmask;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ enbits = VSW_SET_STATEFUL_BITS_TOE((u16)enable);
+ enmask = VSW_SET_STATEFUL_BITS_TOE(0x1U);
+
+ return cfg_set_func_sf_en(hwdev, enbits, enmask);
+}
+EXPORT_SYMBOL(hinic_set_toe_enable);
+
+bool hinic_get_toe_enable(void *hwdev)
+{
+ int err;
+ u32 enbits;
+
+ if (!hwdev)
+ return false;
+
+ err = cfg_get_func_sf_en(hwdev, &enbits);
+ if (err)
+ return false;
+
+ return VSW_GET_STATEFUL_BITS_TOE(enbits);
+}
+EXPORT_SYMBOL(hinic_get_toe_enable);
+
+int hinic_set_fcoe_enable(void *hwdev, bool enable)
+{
+ u32 enbits;
+ u32 enmask;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ enbits = VSW_SET_STATEFUL_BITS_FCOE((u16)enable);
+ enmask = VSW_SET_STATEFUL_BITS_FCOE(0x1U);
+
+ return cfg_set_func_sf_en(hwdev, enbits, enmask);
+}
+EXPORT_SYMBOL(hinic_set_fcoe_enable);
+
+bool hinic_get_fcoe_enable(void *hwdev)
+{
+ int err;
+ u32 enbits;
+
+ if (!hwdev)
+ return false;
+
+ err = cfg_get_func_sf_en(hwdev, &enbits);
+ if (err)
+ return false;
+
+ return VSW_GET_STATEFUL_BITS_FCOE(enbits);
+}
+EXPORT_SYMBOL(hinic_get_fcoe_enable);
+
+bool hinic_get_stateful_enable(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ return dev->cfg_mgmt->svc_cap.sf_en;
+}
+EXPORT_SYMBOL(hinic_get_stateful_enable);
+
+u8 hinic_host_oq_id_mask(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting host oq id mask\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val;
+}
+EXPORT_SYMBOL(hinic_host_oq_id_mask);
+
+u8 hinic_host_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting host id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.host_id;
+}
+EXPORT_SYMBOL(hinic_host_id);
+
+u16 hinic_host_total_func(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting host total function number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.host_total_function;
+}
+EXPORT_SYMBOL(hinic_host_total_func);
+
+u16 hinic_func_max_qnum(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function max queue number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.max_sqs;
+}
+EXPORT_SYMBOL(hinic_func_max_qnum);
+
+u16 hinic_func_max_nic_qnum(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function max queue number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs;
+}
+EXPORT_SYMBOL(hinic_func_max_nic_qnum);
+
+u8 hinic_ep_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting ep id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.ep_id;
+}
+EXPORT_SYMBOL(hinic_ep_id);
+
+u8 hinic_er_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting er id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.er_id;
+}
+EXPORT_SYMBOL(hinic_er_id);
+
+u8 hinic_physical_port_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting physical port id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.port_id;
+}
+EXPORT_SYMBOL(hinic_physical_port_id);
+
+u8 hinic_func_max_vf(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting max vf number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.max_vf;
+}
+EXPORT_SYMBOL(hinic_func_max_vf);
+
+u8 hinic_max_num_cos(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting max cos number\n");
+ return 0;
+ }
+ return (u8)(dev->cfg_mgmt->svc_cap.max_cos_id + 1);
+}
+EXPORT_SYMBOL(hinic_max_num_cos);
+
+u8 hinic_cos_valid_bitmap(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n");
+ return 0;
+ }
+ return (u8)(dev->cfg_mgmt->svc_cap.cos_valid_bitmap);
+}
+EXPORT_SYMBOL(hinic_cos_valid_bitmap);
+
+u8 hinic_net_port_mode(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting net port mode\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.net_port_mode;
+}
+EXPORT_SYMBOL(hinic_net_port_mode);
+
+bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev || state > HINIC_HWDEV_ALL_INITED)
+ return false;
+
+ return !!test_bit(state, &dev->func_state);
+}
+
+static int hinic_os_dep_init(struct hinic_hwdev *hwdev)
+{
+ hwdev->workq = create_singlethread_workqueue(HINIC_HW_WQ_NAME);
+ if (!hwdev->workq) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n");
+ return -EFAULT;
+ }
+
+ sema_init(&hwdev->recover_sem, 1);
+ sema_init(&hwdev->fault_list_sem, 1);
+
+ INIT_WORK(&hwdev->fault_work, hinic_fault_work_handler);
+
+ return 0;
+}
+
+static void hinic_os_dep_deinit(struct hinic_hwdev *hwdev)
+{
+ destroy_work(&hwdev->fault_work);
+ destroy_workqueue(hwdev->workq);
+ down(&hwdev->fault_list_sem);
+
+ up(&hwdev->fault_list_sem);
+
+ sema_deinit(&hwdev->fault_list_sem);
+ sema_deinit(&hwdev->recover_sem);
+}
+
+void hinic_ppf_hwdev_unreg(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ down(&dev->ppf_sem);
+ dev->ppf_hwdev = NULL;
+ up(&dev->ppf_sem);
+
+ sdk_info(dev->dev_hdl, "Unregister PPF hwdev\n");
+}
+
+void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ down(&dev->ppf_sem);
+ dev->ppf_hwdev = ppf_hwdev;
+ up(&dev->ppf_sem);
+
+ sdk_info(dev->dev_hdl, "Register PPF hwdev\n");
+}
+
+static int __vf_func_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_vf_mbox_random_id_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init vf mbox random id\n");
+ return err;
+ }
+ err = hinic_vf_func_init(hwdev);
+ if (err)
+ nic_err(hwdev->dev_hdl, "Failed to init nic mbox\n");
+
+ return err;
+}
+
+static int __hilink_phy_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (!HINIC_IS_VF(hwdev)) {
+ err = hinic_phy_init_status_judge(hwdev);
+ if (err) {
+ sdk_info(hwdev->dev_hdl, "Phy init failed\n");
+ return err;
+ }
+
+ if (hinic_support_nic(hwdev, NULL))
+ hinic_hilink_info_show(hwdev);
+ }
+
+ return 0;
+}
+
+/* Return:
+ * 0: all success
+ * >0: partitial success
+ * <0: all failed
+ */
+int hinic_init_hwdev(struct hinic_init_para *para)
+{
+ struct hinic_hwdev *hwdev;
+ int err;
+
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ *para->hwdev = hwdev;
+ hwdev->adapter_hdl = para->adapter_hdl;
+ hwdev->pcidev_hdl = para->pcidev_hdl;
+ hwdev->dev_hdl = para->dev_hdl;
+ hwdev->chip_node = para->chip_node;
+ hwdev->ppf_hwdev = para->ppf_hwdev;
+ sema_init(&hwdev->ppf_sem, 1);
+
+ hwdev->chip_fault_stats = vzalloc(HINIC_CHIP_FAULT_SIZE);
+ if (!hwdev->chip_fault_stats)
+ goto alloc_chip_fault_stats_err;
+
+ err = hinic_init_hwif(hwdev, para->cfg_reg_base, para->intr_reg_base,
+ para->db_base_phy, para->db_base,
+ para->dwqe_mapping);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init hwif\n");
+ goto init_hwif_err;
+ }
+
+ /* detect slave host according to BAR reg */
+ detect_host_mode_pre(hwdev);
+
+ err = hinic_os_dep_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n");
+ goto os_dep_init_err;
+ }
+
+ hinic_set_chip_present(hwdev);
+ hinic_init_heartbeat(hwdev);
+
+ err = init_cfg_mgmt(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n");
+ goto init_cfg_mgmt_err;
+ }
+
+ err = hinic_init_comm_ch(hwdev);
+ if (err) {
+ if (!(hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK)) {
+ sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n");
+ goto init_comm_ch_err;
+ } else {
+ sdk_err(hwdev->dev_hdl, "Init communication channel partitail
failed\n");
+ return hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK;
+ }
+ }
+
+ err = init_capability(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init capability\n");
+ goto init_cap_err;
+ }
+
+ err = __vf_func_init(hwdev);
+ if (err)
+ goto vf_func_init_err;
+
+ err = __hilink_phy_init(hwdev);
+ if (err)
+ goto hilink_phy_init_err;
+
+ set_bit(HINIC_HWDEV_ALL_INITED, &hwdev->func_state);
+
+ sdk_info(hwdev->dev_hdl, "Init hwdev success\n");
+
+ return 0;
+
+hilink_phy_init_err:
+
+ hinic_vf_func_free(hwdev);
+vf_func_init_err:
+ free_capability(hwdev);
+init_cap_err:
+ return (hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK);
+
+init_comm_ch_err:
+ free_cfg_mgmt(hwdev);
+
+init_cfg_mgmt_err:
+ hinic_destroy_heartbeat(hwdev);
+ hinic_os_dep_deinit(hwdev);
+
+os_dep_init_err:
+ hinic_free_hwif(hwdev);
+
+init_hwif_err:
+ vfree(hwdev->chip_fault_stats);
+
+alloc_chip_fault_stats_err:
+ kfree(hwdev);
+
+ *para->hwdev = NULL;
+
+ return -EFAULT;
+}
+
+void hinic_free_hwdev(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ if (test_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state)) {
+ clear_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state);
+
+ /* BM slave function not need to exec rx_tx_flush */
+ if (dev->func_mode != FUNC_MOD_MULTI_BM_SLAVE)
+ hinic_func_rx_tx_flush(hwdev);
+
+ hinic_vf_func_free(hwdev);
+
+ free_capability(dev);
+ }
+
+ hinic_uninit_comm_ch(dev);
+ free_cfg_mgmt(dev);
+ hinic_destroy_heartbeat(dev);
+ hinic_os_dep_deinit(dev);
+ hinic_free_hwif(dev);
+ vfree(dev->chip_fault_stats);
+ sema_deinit(&dev->ppf_sem);
+ kfree(dev);
+}
+
+void hinic_shutdown_hwdev(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ if (IS_SLAVE_HOST(dev))
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+}
+
+u32 hinic_func_pf_num(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting pf number capability\n");
+ return 0;
+ }
+
+ return dev->cfg_mgmt->svc_cap.pf_num;
+}
+
+u64 hinic_get_func_feature_cap(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting pf number capability\n");
+ return 0;
+ }
+
+ return dev->feature_cap;
+}
+
+enum hinic_func_mode hinic_get_func_mode(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting pf number capability\n");
+ return 0;
+ }
+
+ return dev->func_mode;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.h
b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h
new file mode 100644
index 000000000000..b31c624ef61d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __CFG_MGT_H__
+#define __CFG_MGT_H__
+
+#include "hinic_ctx_def.h"
+
+#define CFG_MAX_CMD_TIMEOUT 1000 /* ms */
+
+enum {
+ CFG_FREE = 0,
+ CFG_BUSY = 1
+};
+
+/* start position for CEQs allocation, Max number of CEQs is 32 */
+/*lint -save -e849*/
+enum {
+ CFG_RDMA_CEQ_BASE = 0
+};
+
+/*lint -restore*/
+enum {
+ CFG_NET_MODE_ETH = 0, /* Eth */
+ CFG_NET_MODE_FIC = 1, /* FIC */
+ CFG_NET_MODE_FC = 2 /* FC */
+};
+
+enum {
+ SF_SVC_FT_BIT = (1 << 0),
+ SF_SVC_RDMA_BIT = (1 << 1),
+};
+
+/* RDMA resource */
+#define K_UNIT BIT(10)
+#define M_UNIT BIT(20)
+#define G_UNIT BIT(30)
+
+/* number of PFs and VFs */
+#define HOST_PF_NUM 4
+#define HOST_VF_NUM 0
+#define HOST_OQID_MASK_VAL 2
+
+//#define MAX_NUM_AEQ 1
+
+/* L2NIC */
+#define L2NIC_SQ_DEPTH (4 * K_UNIT)
+#define L2NIC_RQ_DEPTH (4 * K_UNIT)
+
+#define HINIC_CFG_MAX_QP 64
+
+/* RDMA */
+#define RDMA_RSVD_QPS 2
+#define ROCE_MAX_WQES (16 * K_UNIT - 1)
+#define IWARP_MAX_WQES (8 * K_UNIT)
+
+#define RDMA_MAX_SQ_SGE 8
+
+#define ROCE_MAX_RQ_SGE 8
+#define IWARP_MAX_RQ_SGE 2
+
+#define RDMA_MAX_SQ_DESC_SZ (1 * K_UNIT)
+
+/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 64B(max_task_seg_len)) */
+#define ROCE_MAX_SQ_INLINE_DATA_SZ 192
+
+#define IWARP_MAX_SQ_INLINE_DATA_SZ 108
+
+#define ROCE_MAX_RQ_DESC_SZ 128
+#define IWARP_MAX_RQ_DESC_SZ 64
+
+#define IWARP_MAX_IRQ_DEPTH 1024
+#define IWARP_IRQ_ENTRY_SZ 64
+
+#define IWARP_MAX_ORQ_DEPTH 1024
+#define IWARP_ORQ_ENTRY_SZ 32
+
+#define IWARP_MAX_RTOQ_DEPTH 1024
+#define IWARP_RTOQ_ENTRY_SZ 32
+
+#define IWARP_MAX_ACKQ_DEPTH 1024
+#define IWARP_ACKQ_ENTRY_SZ 16
+
+#define ROCE_QPC_ENTRY_SZ 512
+#define IWARP_QPC_ENTRY_SZ 1024
+
+#define WQEBB_SZ 64
+
+#define ROCE_RDMARC_ENTRY_SZ 32
+#define ROCE_MAX_QP_INIT_RDMA 128
+#define ROCE_MAX_QP_DEST_RDMA 128
+
+#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1)
+#define ROCE_RSVD_SRQS 0
+#define ROCE_MAX_SRQ_SGE 7
+#define ROCE_SRQC_ENTERY_SZ 64
+
+#define RDMA_MAX_CQES (64 * K_UNIT - 1)
+#define RDMA_RSVD_CQS 0
+
+#define RDMA_CQC_ENTRY_SZ 128
+
+#define RDMA_CQE_SZ 32
+#define RDMA_RSVD_MRWS 128
+#define RDMA_MPT_ENTRY_SZ 64
+#define RDMA_NUM_MTTS (1 * G_UNIT)
+#define LOG_MTT_SEG 5
+#define MTT_ENTRY_SZ 8
+#define LOG_RDMARC_SEG 3
+
+#define LOCAL_ACK_DELAY 15
+#define RDMA_NUM_PORTS 1
+#define ROCE_MAX_MSG_SZ (2 * G_UNIT) /* */
+#define IWARP_MAX_MSG_SZ (1 * G_UNIT)
+
+#define DB_PAGE_SZ (4 * K_UNIT)
+#define DWQE_SZ 256
+
+#define NUM_PD (128 * K_UNIT)
+#define RSVD_PD 0
+
+#define MAX_XRCDS (64 * K_UNIT)
+#define RSVD_XRCDS 0
+
+#define MAX_GID_PER_PORT 16
+#define GID_ENTRY_SZ 32
+#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8)
+#define NUM_COMP_VECTORS 32
+#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 13) | (1UL << 14) |
\
+ (1UL << 16) | (1UL << 18) | (1UL << 20) | \
+ (1UL << 22))
+#define ROCE_MODE 1
+
+#define MAX_FRPL_LEN 511
+#define MAX_PKEYS 1
+
+/* FCoE */
+#define FCOE_PCTX_SZ 256
+#define FCOE_CCTX_SZ 256
+#define FCOE_SQE_SZ 128
+#define FCOE_SCQC_SZ 64
+#define FCOE_SCQE_SZ 64
+#define FCOE_SRQC_SZ 64
+#define FCOE_SRQE_SZ 32
+
+/* ToE */
+#define TOE_PCTX_SZ 1024
+#define TOE_CQC_SZ 64
+
+/* IoE */
+#define IOE_PCTX_SZ 512
+
+/* FC */
+#define FC_PCTX_SZ 256
+#define FC_CCTX_SZ 256
+#define FC_SQE_SZ 128
+#define FC_SCQC_SZ 64
+#define FC_SCQE_SZ 64
+#define FC_SRQC_SZ 64
+#define FC_SRQE_SZ 32
+
+/* OVS */
+#define OVS_PCTX_SZ 256
+#define OVS_SCQC_SZ 64
+
+/* ACL */
+#define ACL_PCTX_SZ 512
+#define ACL_SCQC_SZ 64
+
+struct dev_sf_svc_attr {
+ bool ft_en; /* business enable flag (not include RDMA) */
+ bool ft_pf_en; /* In FPGA Test VF resource is in PF or not,
+ * 0 - VF, 1 - PF, VF doesn't need this bit.
+ */
+ bool rdma_en;
+ bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not,
+ * 0 - VF, 1 - PF, VF doesn't need this bit.
+ */
+ u8 sf_en_vf; /* SF_EN for PPF/PF's VF */
+};
+
+struct host_shared_resource_cap {
+ u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */
+ u32 host_cctxs; /* Child Context: max 8K */
+ u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ
+ * TOE/IOE/FCoE each uses 1 SCQ
+ * RoCE/IWARP uses multiple SCQs
+ * So 6 SCQ least
+ */
+ u32 host_srqs; /* SRQ number: 256K */
+ u32 host_mpts; /* MR number:1M */
+};
+
+/* device capability */
+struct service_cap {
+ struct dev_sf_svc_attr sf_svc_attr;
+ enum cfg_svc_type_en svc_type; /* user input service type */
+ enum cfg_svc_type_en chip_svc_type; /* HW supported service type */
+
+ /* Host global resources */
+ u16 host_total_function;
+ u8 host_oq_id_mask_val;
+ u8 host_id;
+ u8 ep_id;
+ /* DO NOT get interrupt_type from firmware */
+ enum intr_type interrupt_type;
+ u8 intr_chip_en;
+ u8 max_cos_id; /* PF/VF's max cos id */
+ u8 cos_valid_bitmap;
+ u8 er_id; /* PF/VF's ER */
+ u8 port_id; /* PF/VF's physical port */
+ u8 max_vf; /* max VF number that PF supported */
+ bool sf_en; /* stateful business status */
+ u8 timer_en; /* 0:disable, 1:enable */
+ u8 bloomfilter_en; /* 0:disable, 1:enable*/
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* For test */
+ u32 test_qpc_num;
+ u32 test_qpc_resvd_num;
+ u32 test_page_size_reorder;
+ bool test_xid_alloc_mode;
+ bool test_gpa_check_enable;
+ u8 test_qpc_alloc_mode;
+ u8 test_scqc_alloc_mode;
+
+ u32 test_max_conn_num;
+ u32 test_max_cache_conn_num;
+ u32 test_scqc_num;
+ u32 test_mpt_num;
+ u32 test_scq_resvd_num;
+ u32 test_mpt_recvd_num;
+ u32 test_hash_num;
+ u32 test_reorder_num;
+
+ u32 max_connect_num; /* PF/VF maximum connection number(1M) */
+ /* The maximum connections which can be stick to cache memory, max 1K */
+ u16 max_stick2cache_num;
+ /* Starting address in cache memory for bloom filter, 64Bytes aligned */
+ u16 bfilter_start_addr;
+ /* Length for bloom filter, aligned on 64Bytes. The size is length*64B.
+ * Bloom filter memory size + 1 must be power of 2.
+ * The maximum memory size of bloom filter is 4M
+ */
+ u16 bfilter_len;
+ /* The size of hash bucket tables, align on 64 entries.
+ * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2.
+ * The maximum number of hash bucket is 4M
+ */
+ u16 hash_bucket_num;
+ u8 net_port_mode; /* 0:ETH,1:FIC,2:4FC */
+
+ u32 pf_num;
+ u32 pf_id_start;
+ u32 vf_num; /* max numbers of vf in current host */
+ u32 vf_id_start;
+
+ struct host_shared_resource_cap shared_res_cap; /* shared capability */
+ struct dev_version_info dev_ver_info; /* version */
+ struct nic_service_cap nic_cap; /* NIC capability */
+ struct rdma_service_cap rdma_cap; /* RDMA capability */
+ struct fcoe_service_cap fcoe_cap; /* FCoE capability */
+ struct toe_service_cap toe_cap; /* ToE capability */
+ struct fc_service_cap fc_cap; /* FC capability */
+ struct ovs_service_cap ovs_cap; /* OVS capability */
+ struct acl_service_cap acl_cap; /* ACL capability */
+};
+
+struct cfg_eq {
+ enum hinic_service_type type;
+ int eqn;
+ int free; /* 1 - alocated, 0- freed */
+};
+
+struct cfg_eq_info {
+ struct cfg_eq *eq;
+
+ u8 num_ceq;
+ //u8 num_aeq;
+ //u8 num_eq; /* num_eq = num_ceq + num_aeq */
+
+ u8 num_ceq_remain;
+
+ /* mutex used for allocate EQs */
+ struct mutex eq_mutex;
+};
+
+struct irq_alloc_info_st {
+ enum hinic_service_type type;
+ int free; /* 1 - alocated, 0- freed */
+ struct irq_info info;
+};
+
+struct cfg_irq_info {
+ struct irq_alloc_info_st *alloc_info;
+ u16 num_total;
+ u16 num_irq_remain;
+ u16 num_irq_hw; /* device max irq number */
+
+ /* mutex used for allocate EQs */
+ struct mutex irq_mutex;
+};
+
+#define VECTOR_THRESHOLD 2
+
+struct cfg_mgmt_info {
+ struct hinic_hwdev *hwdev;
+ struct service_cap svc_cap;
+ struct cfg_eq_info eq_info; /* EQ */
+ struct cfg_irq_info irq_param_info; /* IRQ */
+ u32 func_seq_num; /* temporary */
+};
+
+enum cfg_sub_cmd {
+ /* PPF(PF) <-> FW */
+ HINIC_CFG_NIC_CAP = 0,
+ CFG_FW_VERSION,
+ CFG_UCODE_VERSION,
+ HINIC_CFG_FUNC_CAP,
+ HINIC_CFG_MBOX_CAP = 6,
+};
+
+struct hinic_dev_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ /* Public resource */
+ u8 sf_svc_attr;
+ u8 host_id;
+ u8 sf_en_pf;
+ u8 sf_en_vf;
+
+ u8 ep_id;
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u16 svc_cap_en;
+ u16 host_total_func;
+ u8 host_oq_id_mask_val;
+ u8 max_vf_cos_id;
+
+ u32 max_conn_num;
+ u16 max_stick2cache_num;
+ u16 max_bfilter_start_addr;
+ u16 bfilter_len;
+ u16 hash_bucket_num;
+ u8 cfg_file_ver;
+ u8 net_port_mode;
+ u8 valid_cos_bitmap; /* every bit indicate cos is valid */
+ u8 rsvd1;
+ u32 pf_num;
+ u32 pf_id_start;
+ u32 vf_num;
+ u32 vf_id_start;
+
+ /* shared resource */
+ u32 host_pctx_num;
+ u8 host_sf_en;
+ u8 rsvd2[3];
+ u32 host_ccxt_num;
+ u32 host_scq_num;
+ u32 host_srq_num;
+ u32 host_mpt_num;
+
+ /* l2nic */
+ u16 nic_max_sq;
+ u16 nic_max_rq;
+ u16 nic_vf_max_sq;
+ u16 nic_vf_max_rq;
+ u8 nic_lro_en;
+ u8 nic_lro_sz;
+ u8 nic_tso_sz;
+ u8 rsvd3;
+
+ /* RoCE */
+ u32 roce_max_qp;
+ u32 roce_max_cq;
+ u32 roce_max_srq;
+ u32 roce_max_mpt;
+
+ u32 roce_vf_max_qp;
+ u32 roce_vf_max_cq;
+ u32 roce_vf_max_srq;
+ u32 roce_vf_max_mpt;
+
+ u32 roce_cmtt_cl_start;
+ u32 roce_cmtt_cl_end;
+ u32 roce_cmtt_cl_size;
+
+ u32 roce_dmtt_cl_start;
+ u32 roce_dmtt_cl_end;
+ u32 roce_dmtt_cl_size;
+
+ u32 roce_wqe_cl_start;
+ u32 roce_wqe_cl_end;
+ u32 roce_wqe_cl_size;
+
+ /* IWARP */
+ u32 iwarp_max_qp;
+ u32 iwarp_max_cq;
+ u32 iwarp_max_mpt;
+
+ u32 iwarp_vf_max_qp;
+ u32 iwarp_vf_max_cq;
+ u32 iwarp_vf_max_mpt;
+
+ u32 iwarp_cmtt_cl_start;
+ u32 iwarp_cmtt_cl_end;
+ u32 iwarp_cmtt_cl_size;
+
+ u32 iwarp_dmtt_cl_start;
+ u32 iwarp_dmtt_cl_end;
+ u32 iwarp_dmtt_cl_size;
+
+ u32 iwarp_wqe_cl_start;
+ u32 iwarp_wqe_cl_end;
+ u32 iwarp_wqe_cl_size;
+
+ /* FCoE */
+ u32 fcoe_max_qp;
+ u32 fcoe_max_cq;
+ u32 fcoe_max_srq;
+
+ u32 fcoe_max_cctx;
+ u32 fcoe_cctx_id_start;
+
+ u8 fcoe_vp_id_start;
+ u8 fcoe_vp_id_end;
+ u8 rsvd4[2];
+
+ /* OVS */
+ u32 ovs_max_qpc;
+ u32 rsvd6;
+
+ /* ToE */
+ u32 toe_max_pctx;
+ u32 toe_max_cq;
+ u32 toe_max_srq;
+ u32 toe_srq_id_start;
+
+ /* FC */
+ u32 fc_max_pctx;
+ u32 fc_max_scq;
+ u32 fc_max_srq;
+
+ u32 fc_max_cctx;
+ u32 fc_cctx_id_start;
+
+ u8 fc_vp_id_start;
+ u8 fc_vp_id_end;
+ u16 func_id;
+};
+
+#define VSW_UP_CFG_TIMEOUT (0xFF00000)
+
+#define VSW_SET_STATEFUL_BITS_TOE(flag) \
+ ((flag) << VSW_STATEFUL_TOE_EN)
+#define VSW_SET_STATEFUL_BITS_FCOE(flag) \
+ ((flag) << VSW_STATEFUL_FCOE_EN)
+#define VSW_SET_STATEFUL_BITS_IWARP(flag) \
+ ((flag) << VSW_STATEFUL_IWARP_EN)
+#define VSW_SET_STATEFUL_BITS_ROCE(flag) \
+ ((flag) << VSW_STATEFUL_ROCE_EN)
+
+#define VSW_GET_STATEFUL_BITS_TOE(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_TOE_EN) & 0x1U))
+#define VSW_GET_STATEFUL_BITS_FCOE(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_FCOE_EN) & 0x1U))
+#define VSW_GET_STATEFUL_BITS_IWARP(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_IWARP_EN) & 0x1U))
+#define VSW_GET_STATEFUL_BITS_ROCE(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_ROCE_EN) & 0x1U))
+
+enum tag_vsw_major_cmd {
+ VSW_MAJOR_MISC = 10, /* 0~9 reserved for driver */
+ VSW_MAJOR_L2SWITCH,
+ VSW_MAJOR_L2MULTICAST,
+ VSW_MAJOR_QOS,
+ VSW_MAJOR_PKTSUPS,
+ VSW_MAJOR_VLANFILTER,
+ VSW_MAJOR_MACFILTER,
+ VSW_MAJOR_IPFILTER,
+ VSW_MAJOR_VLANMAPPING,
+ VSW_MAJOR_ETHTRUNK,
+ VSW_MAJOR_MIRROR,
+ VSW_MAJOR_DFX,
+ VSW_MAJOR_ACL,
+};
+
+enum tag_vsw_minor_misc_cmd {
+ VSW_MINOR_MISC_INIT_FUNC = 0,
+ VSW_MINOR_MISC_SET_FUNC_SF_ENBITS,
+ VSW_MINOR_MISC_GET_FUNC_SF_ENBITS,
+ VSW_MINOR_MISC_CMD_MAX,
+};
+
+/* vswitch eth-trunk sub-command */
+enum tag_nic_stateful_enbits {
+ VSW_STATEFUL_TOE_EN = 0,
+ VSW_STATEFUL_FCOE_EN = 1,
+ VSW_STATEFUL_IWARP_EN = 2,
+ VSW_STATEFUL_ROCE_EN = 3,
+};
+
+/* function stateful enable parameters */
+struct nic_misc_func_sf_enbits {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+ u32 function_id;
+ u32 stateful_enbits; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */
+ u32 stateful_enmask; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
new file mode 100644
index 000000000000..e91c310e1894
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
@@ -0,0 +1,1503 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_hwif.h"
+#include "hinic_nic_io.h"
+#include "hinic_eqs.h"
+#include "hinic_wq.h"
+#include "hinic_cmdq.h"
+
+#define CMDQ_CMD_TIMEOUT 1000 /* millisecond */
+
+#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
+#define LOWER_8_BITS(data) ((data) & 0xFF)
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
+#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23
+#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
+#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
+#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U
+#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U
+#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU
+
+#define CMDQ_DB_INFO_SET(val, member) \
+ (((val) & CMDQ_DB_INFO_##member##_MASK) \
+ << CMDQ_DB_INFO_##member##_SHIFT)
+
+#define CMDQ_CTRL_PI_SHIFT 0
+#define CMDQ_CTRL_CMD_SHIFT 16
+#define CMDQ_CTRL_MOD_SHIFT 24
+#define CMDQ_CTRL_ACK_TYPE_SHIFT 29
+#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_CTRL_PI_MASK 0xFFFFU
+#define CMDQ_CTRL_CMD_MASK 0xFFU
+#define CMDQ_CTRL_MOD_MASK 0x1FU
+#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_CTRL_SET(val, member) \
+ (((val) & CMDQ_CTRL_##member##_MASK) \
+ << CMDQ_CTRL_##member##_SHIFT)
+
+#define CMDQ_CTRL_GET(val, member) \
+ (((val) >> CMDQ_CTRL_##member##_SHIFT) \
+ & CMDQ_CTRL_##member##_MASK)
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
+#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
+#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_WQE_HEADER_SET(val, member) \
+ (((val) & CMDQ_WQE_HEADER_##member##_MASK) \
+ << CMDQ_WQE_HEADER_##member##_SHIFT)
+
+#define CMDQ_WQE_HEADER_GET(val, member) \
+ (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) \
+ & CMDQ_WQE_HEADER_##member##_MASK)
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
+#define CMDQ_CTXT_EQ_ID_SHIFT 56
+#define CMDQ_CTXT_CEQ_ARM_SHIFT 61
+#define CMDQ_CTXT_CEQ_EN_SHIFT 62
+#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_EQ_ID_MASK 0x1F
+#define CMDQ_CTXT_CEQ_ARM_MASK 0x1
+#define CMDQ_CTXT_CEQ_EN_MASK 0x1
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1
+
+#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) \
+ << CMDQ_CTXT_##member##_SHIFT)
+
+#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \
+ (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \
+ & CMDQ_CTXT_##member##_MASK)
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
+#define CMDQ_CTXT_CI_SHIFT 52
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_CI_MASK 0xFFF
+
+#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) \
+ << CMDQ_CTXT_##member##_SHIFT)
+
+#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \
+ (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \
+ & CMDQ_CTXT_##member##_MASK)
+
+#define SAVED_DATA_ARM_SHIFT 31
+
+#define SAVED_DATA_ARM_MASK 0x1U
+
+#define SAVED_DATA_SET(val, member) \
+ (((val) & SAVED_DATA_##member##_MASK) \
+ << SAVED_DATA_##member##_SHIFT)
+
+#define SAVED_DATA_CLEAR(val, member) \
+ ((val) & (~(SAVED_DATA_##member##_MASK \
+ << SAVED_DATA_##member##_SHIFT)))
+
+#define WQE_ERRCODE_VAL_SHIFT 20
+
+#define WQE_ERRCODE_VAL_MASK 0xF
+
+#define WQE_ERRCODE_GET(val, member) \
+ (((val) >> WQE_ERRCODE_##member##_SHIFT) & \
+ WQE_ERRCODE_##member##_MASK)
+
+#define CEQE_CMDQ_TYPE_SHIFT 0
+
+#define CEQE_CMDQ_TYPE_MASK 0x7
+
+#define CEQE_CMDQ_GET(val, member) \
+ (((val) >> CEQE_CMDQ_##member##_SHIFT) & CEQE_CMDQ_##member##_MASK)
+
+#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
+
+#define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
+
+#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
+
+#define CMDQ_DB_ADDR(db_base, pi) \
+ (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
+
+#define CMDQ_PFN_SHIFT 12
+#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT)
+
+#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
+
+#define WQE_LCMD_SIZE 64
+#define WQE_SCMD_SIZE 64
+
+#define COMPLETE_LEN 3
+
+#define CMDQ_WQEBB_SIZE 64
+#define CMDQ_WQE_SIZE 64
+
+#define CMDQ_WQ_PAGE_SIZE 4096
+
+#define WQE_NUM_WQEBBS(wqe_size, wq) \
+ ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
+
+#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
+ struct hinic_cmdqs, cmdq[0])
+
+#define CMDQ_SEND_CMPT_CODE 10
+#define CMDQ_COMPLETE_CMPT_CODE 11
+
+enum cmdq_scmd_type {
+ CMDQ_SET_ARM_CMD = 2,
+};
+
+enum cmdq_wqe_type {
+ WQE_LCMD_TYPE,
+ WQE_SCMD_TYPE,
+};
+
+enum ctrl_sect_len {
+ CTRL_SECT_LEN = 1,
+ CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum bufdesc_len {
+ BUFDESC_LCMD_LEN = 2,
+ BUFDESC_SCMD_LEN = 3,
+};
+
+enum data_format {
+ DATA_SGE,
+ DATA_DIRECT,
+};
+
+enum completion_format {
+ COMPLETE_DIRECT,
+ COMPLETE_SGE,
+};
+
+enum completion_request {
+ CEQ_SET = 1,
+};
+
+enum cmdq_cmd_type {
+ SYNC_CMD_DIRECT_RESP,
+ SYNC_CMD_SGE_RESP,
+ ASYNC_CMD,
+};
+
+bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
+{
+ struct hinic_wq *wq = cmdq->wq;
+
+ return (atomic_read(&wq->delta) == wq->q_depth ? true : false);
+}
+
+struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
+{
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_cmd_buf *cmd_buf;
+ void *dev;
+
+ if (!hwdev) {
+ pr_err("Failed to alloc cmd buf, Invalid hwdev\n");
+ return NULL;
+ }
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+ dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+
+ cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC);
+ if (!cmd_buf)
+ return NULL;
+
+ cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
+ &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n");
+ goto alloc_pci_buf_err;
+ }
+
+ return cmd_buf;
+
+alloc_pci_buf_err:
+ kfree(cmd_buf);
+ return NULL;
+}
+EXPORT_SYMBOL(hinic_alloc_cmd_buf);
+
+void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
+{
+ struct hinic_cmdqs *cmdqs;
+
+ if (!hwdev || !cmd_buf) {
+ pr_err("Failed to free cmd buf: hwdev: %p, cmd_buf: %p\n",
+ hwdev, cmd_buf);
+ return;
+ }
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+EXPORT_SYMBOL(hinic_free_cmd_buf);
+
+static int cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
+{
+ int wqe_size = 0;
+
+ switch (wqe_type) {
+ case WQE_LCMD_TYPE:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case WQE_SCMD_TYPE:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static int cmdq_get_wqe_size(enum bufdesc_len len)
+{
+ int wqe_size = 0;
+
+ switch (len) {
+ case BUFDESC_LCMD_LEN:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case BUFDESC_SCMD_LEN:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
+ struct hinic_cmd_buf *buf_out)
+{
+ struct hinic_sge_resp *sge_resp = &complete->sge_resp;
+
+ hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
+ HINIC_CMDQ_BUF_SIZE);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
+ struct hinic_cmd_buf *buf_in)
+{
+ hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
+}
+
+static void cmdq_set_inline_wqe_data(struct hinic_cmdq_inline_wqe *wqe,
+ void *buf_in, u32 in_size)
+{
+ struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
+
+ wqe_scmd->buf_desc.buf_len = in_size;
+ memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
+}
+
+static void cmdq_fill_db(struct hinic_cmdq_db *db,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
+ CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
+ CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
+ CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
+}
+
+static void cmdq_set_db(struct hinic_cmdq *cmdq,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ struct hinic_cmdq_db db;
+
+ cmdq_fill_db(&db, cmdq_type, prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ db.db_info = cpu_to_be32(db.db_info);
+
+ wmb(); /* write all before the doorbell */
+ writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
+}
+
+static void cmdq_wqe_fill(void *dst, void *src)
+{
+ memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
+ (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
+ CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
+
+ wmb(); /* The first 8 bytes should be written last */
+
+ *(u64 *)dst = *(u64 *)src;
+}
+
+static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
+ enum completion_format complete_format,
+ enum data_format data_format,
+ enum bufdesc_len buf_len)
+{
+ struct hinic_ctrl *ctrl;
+ enum ctrl_sect_len ctrl_len;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ u32 saved_data = WQE_HEADER(wqe)->saved_data;
+
+ if (data_format == DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD) |
+ CMDQ_CTRL_SET(ack_type, ACK_TYPE);
+
+ WQE_HEADER(wqe)->header_info =
+ CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
+ CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
+ CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
+
+ if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data |
+ SAVED_DATA_SET(1, ARM);
+ } else {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data;
+ }
+}
+
+static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+ break;
+ case ASYNC_CMD:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+
+ wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
+ prod_idx, complete_format, DATA_SGE,
+ BUFDESC_LCMD_LEN);
+
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static void cmdq_set_inline_wqe(struct hinic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ void *buf_in, u16 in_size,
+ struct hinic_cmd_buf *buf_out, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_SGE_RESP:
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_scmd->completion, buf_out);
+ break;
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_scmd->completion.direct_resp = 0;
+ break;
+ /* TODO: ASYNC CMD */
+ default:
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
+ complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
+
+ cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size);
+}
+
+static void cmdq_update_cmd_status(struct hinic_cmdq *cmdq, u16 prod_idx,
+ struct hinic_cmdq_wqe *wqe)
+{
+ struct hinic_cmdq_cmd_info *cmd_info;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ u32 status_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ cmd_info = &cmdq->cmd_infos[prod_idx];
+
+ if (cmd_info->errcode) {
+ status_info = be32_to_cpu(wqe_lcmd->status.status_info);
+ *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL);
+ }
+
+ if (cmd_info->direct_resp &&
+ cmd_info->cmd_type == HINIC_CMD_TYPE_DIRECT_RESP)
+ *cmd_info->direct_resp =
+ cpu_to_be64(wqe_lcmd->completion.direct_resp);
+}
+
+static int hinic_cmdq_sync_timeout_check(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 pi,
+ enum hinic_mod_type mod, u8 cmd)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_ctrl *ctrl;
+ u32 ctrl_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+ if (!WQE_COMPLETED(ctrl_info)) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set,
mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return -EFAULT;
+ }
+
+ cmdq_update_cmd_status(cmdq, pi, wqe);
+
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed, mod: %u,
cmd: 0x%x\n",
+ mod, cmd);
+ return 0;
+}
+
+static void __clear_cmd_info(struct hinic_cmdq_cmd_info *cmd_info, int *errcode,
+ struct completion *done, u64 *out_param)
+{
+ if (cmd_info->errcode == errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == done)
+ cmd_info->done = NULL;
+
+ if (cmd_info->direct_resp == out_param)
+ cmd_info->direct_resp = NULL;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ struct completion done;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ ulong timeo;
+ u64 curr_msg_id;
+ int err;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+
+ init_completion(&done);
+
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->direct_resp = out_param;
+ cmd_info->cmpt_code = &cmpt_code;
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, ack_type, mod, cmd, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmd_info->cmd_type = HINIC_CMD_TYPE_DIRECT_RESP;
+
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ if (!wait_for_completion_timeout(&done, timeo)) {
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ if (cmd_info->cmpt_code == &cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been
completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ goto timeout_check_ok;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx,
+ mod, cmd);
+ if (err)
+ cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type =
+ HINIC_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ sdk_err(cmdq->hwdev->dev_hdl,
+ "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd:
0x%x\n",
+ mod, cmd);
+ }
+
+ __clear_cmd_info(cmd_info, &errcode, &done, out_param);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ if (!err)
+ goto timeout_check_ok;
+
+ sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx:
0x%x\n",
+ curr_prod_idx);
+ hinic_dump_ceq_info(cmdq->hwdev);
+ destroy_completion(&done);
+ return -ETIMEDOUT;
+ }
+
+timeout_check_ok:
+ destroy_completion(&done);
+ smp_rmb(); /* read error code after completion */
+
+ if (errcode > 1)
+ return errcode;
+
+ return 0;
+}
+
+static int cmdq_sync_cmd_detail_resp(struct hinic_cmdq *cmdq,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out,
+ u32 timeout)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ struct completion done;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ ulong timeo;
+ u64 curr_msg_id;
+ int err;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+
+ init_completion(&done);
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->cmpt_code = &cmpt_code;
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out,
+ wrapped, ack_type, mod, cmd, curr_prod_idx);
+
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmd_info->cmd_type = HINIC_CMD_TYPE_SGE_RESP;
+
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ if (!wait_for_completion_timeout(&done, timeo)) {
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ if (cmd_info->cmpt_code == &cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq detail sync command has been
completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ goto timeout_check_ok;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx,
+ mod, cmd);
+ if (err)
+ cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type =
+ HINIC_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ sdk_err(cmdq->hwdev->dev_hdl,
+ "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd:
0x%x\n",
+ mod, cmd);
+ }
+
+ if (cmd_info->errcode == &errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == &done)
+ cmd_info->done = NULL;
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ if (!err)
+ goto timeout_check_ok;
+
+ sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx:
0x%x\n",
+ curr_prod_idx);
+ hinic_dump_ceq_info(cmdq->hwdev);
+ destroy_completion(&done);
+ return -ETIMEDOUT;
+ }
+
+timeout_check_ok:
+ destroy_completion(&done);
+ smp_rmb(); /* read error code after completion */
+
+ if (errcode > 1)
+ return errcode;
+
+ return 0;
+}
+
+static int cmdq_async_cmd(struct hinic_cmdq *cmdq, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ int wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ int wrapped;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= cmdq->wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= cmdq->wq->q_depth;
+ }
+
+ cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped,
+ ack_type, mod, cmd, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_ASYNC;
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_ASYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return 0;
+}
+
+static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, u16 in_size)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, wqe_size = cmdq_wqe_size(WQE_SCMD_TYPE);
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL,
+ wrapped, HINIC_ACK_TYPE_CMDQ, HINIC_MOD_COMM,
+ CMDQ_SET_ARM_CMD, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ /* cmdq wqe is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_SET_ARM;
+
+ cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return 0;
+}
+
+static int cmdq_params_valid(void *hwdev, struct hinic_cmd_buf *buf_in)
+{
+ if (!buf_in || !hwdev) {
+ pr_err("Invalid CMDQ buffer addr: %p or hwdev: %p\n",
+ buf_in, hwdev);
+ return -EINVAL;
+ }
+
+ if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
+ pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define WAIT_CMDQ_ENABLE_TIMEOUT 300
+
+static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
+ do {
+ if (cmdqs->status & HINIC_CMDQ_ENABLE)
+ return 0;
+ } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag
&&
+ !cmdqs->disable_flag);
+
+ cmdqs->disable_flag = 1;
+
+ return -EBUSY;
+}
+
+int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in, u64 *out_param,
+ u32 timeout)
+{
+ struct hinic_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err) {
+ pr_err("Invalid CMDQ parameters\n");
+ return err;
+ }
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type,
+ mod, cmd, buf_in, out_param, timeout);
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -ETIMEDOUT;
+ else
+ return err;
+}
+EXPORT_SYMBOL(hinic_cmdq_direct_resp);
+
+int hinic_cmdq_detail_resp(void *hwdev,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out,
+ u32 timeout)
+{
+ struct hinic_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err)
+ return err;
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type,
+ mod, cmd, buf_in, buf_out, timeout);
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -ETIMEDOUT;
+ else
+ return err;
+}
+EXPORT_SYMBOL(hinic_cmdq_detail_resp);
+
+int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in)
+{
+ struct hinic_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err)
+ return err;
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ return cmdq_async_cmd(&cmdqs->cmdq[HINIC_CMDQ_ASYNC], ack_type, mod,
+ cmd, buf_in);
+}
+EXPORT_SYMBOL(hinic_cmdq_async);
+
+int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id)
+{
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_cmdq *cmdq;
+ struct hinic_cmdq_arm_bit arm_bit;
+ enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
+ u16 in_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
+ return -EBUSY;
+
+ if (q_type == HINIC_SET_ARM_CMDQ) {
+ if (q_id >= HINIC_MAX_CMDQ_TYPES)
+ return -EFAULT;
+
+ cmdq_type = q_id;
+ }
+ /* sq is using interrupt now, so we only need to set arm bit for cmdq,
+ * remove comment below if need to set sq arm bit
+ * else
+ * cmdq_type = HINIC_CMDQ_SYNC;
+ */
+
+ cmdq = &cmdqs->cmdq[cmdq_type];
+
+ arm_bit.q_type = q_type;
+ arm_bit.q_id = q_id;
+ in_size = sizeof(arm_bit);
+
+ err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl,
+ "Failed to set arm for q_type: %d, qid %d\n",
+ q_type, q_id);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_arm_bit);
+
+static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 ci)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_cmdq_inline_wqe *inline_wqe;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ struct hinic_ctrl *ctrl;
+ u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
+ int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
+ int wqe_size = cmdq_get_wqe_size(buf_len);
+ u16 num_wqebbs;
+
+ if (wqe_size == WQE_LCMD_SIZE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ } else {
+ inline_wqe = &wqe->inline_wqe;
+ wqe_scmd = &inline_wqe->wqe_scmd;
+ ctrl = &wqe_scmd->ctrl;
+ }
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+ cmdq->cmd_infos[ci].cmd_type = HINIC_CMD_TYPE_NONE;
+
+ wmb(); /* verify wqe is clear */
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
+ hinic_put_wqe(cmdq->wq, num_wqebbs);
+}
+
+static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 cons_idx)
+{
+ u16 prod_idx = cons_idx;
+
+ spin_lock(&cmdq->cmdq_lock);
+
+ cmdq_update_cmd_status(cmdq, prod_idx, wqe);
+
+ if (cmdq->cmd_infos[prod_idx].cmpt_code) {
+ *cmdq->cmd_infos[prod_idx].cmpt_code = CMDQ_COMPLETE_CMPT_CODE;
+ cmdq->cmd_infos[prod_idx].cmpt_code = NULL;
+ }
+
+ /* make sure cmpt_code operation before done operation */
+ smp_rmb();
+
+ if (cmdq->cmd_infos[prod_idx].done) {
+ complete(cmdq->cmd_infos[prod_idx].done);
+ cmdq->cmd_infos[prod_idx].done = NULL;
+ }
+
+ spin_unlock(&cmdq->cmdq_lock);
+
+ clear_wqe_complete_bit(cmdq, wqe, cons_idx);
+}
+
+static void cmdq_async_cmd_handler(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 ci)
+{
+ u64 buf = wqe->wqe_lcmd.buf_desc.saved_async_buf;
+ int addr_sz = sizeof(u64);
+
+ hinic_be32_to_cpu((void *)&buf, addr_sz);
+ if (buf)
+ hinic_free_cmd_buf(hwdev, (struct hinic_cmd_buf *)buf);
+
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+}
+
+static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 ci)
+{
+ struct hinic_cmdq_inline_wqe *inline_wqe = &wqe->inline_wqe;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd = &inline_wqe->wqe_scmd;
+ struct hinic_ctrl *ctrl = &wqe_scmd->ctrl;
+ u32 ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+
+ if (!WQE_COMPLETED(ctrl_info))
+ return -EBUSY;
+
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+
+ return 0;
+}
+
+#define HINIC_CMDQ_WQE_HEAD_LEN 32
+static void hinic_dump_cmdq_wqe_head(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_wqe *wqe)
+{
+ u32 i;
+ u32 *data = (u32 *)wqe;
+
+ for (i = 0; i < (HINIC_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) {
+ sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ data[i], data[i + 1], data[i + 2],
+ data[i + 3]);/*lint !e679*/
+ }
+}
+
+void hinic_cmdq_ceq_handler(void *handle, u32 ceqe_data)
+{
+ struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)handle)->cmdqs;
+ enum hinic_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE);
+ struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
+ struct hinic_hwdev *hwdev = cmdqs->hwdev;
+ struct hinic_cmdq_wqe *wqe;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_ctrl *ctrl;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ u32 ctrl_info;
+ u16 ci;
+ int set_arm = 1;
+
+ while ((wqe = hinic_read_wqe(cmdq->wq, 1, &ci)) != NULL) {
+ cmd_info = &cmdq->cmd_infos[ci];
+
+ if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
+ set_arm = 1;
+ break;
+ } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT ||
+ cmd_info->cmd_type == HINIC_CMD_TYPE_FAKE_TIMEOUT) {
+ if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT) {
+ sdk_info(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n",
+ cmdq_type, ci);
+ hinic_dump_cmdq_wqe_head(hwdev, wqe);
+ }
+
+ set_arm = 1;
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_SET_ARM) {
+ /* arm_bit was set until here */
+ set_arm = 0;
+
+ if (cmdq_arm_ceq_handler(cmdq, wqe, ci))
+ break;
+ } else {
+ set_arm = 1;
+
+ /* only arm bit is using scmd wqe, the wqe is lcmd */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+
+ if (!WQE_COMPLETED(ctrl_info))
+ break;
+
+ if (cmdq_type == HINIC_CMDQ_ASYNC)
+ cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci);
+ else
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ }
+ }
+
+ if (set_arm)
+ hinic_set_arm_bit(hwdev, HINIC_SET_ARM_CMDQ, cmdq_type);
+}
+
+static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
+ struct hinic_hwdev *hwdev = cmdqs->hwdev;
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
+ u16 start_ci = (u16)wq->cons_idx;
+
+ /* The data in the HW is in Big Endian Format */
+ wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
+
+ pfn = CMDQ_PFN(wq_first_page_paddr);
+
+ ctxt_info->curr_wqe_page_pfn =
+ CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
+ CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
+ CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
+ CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
+
+ /* If only use one page, use 0-level CLA */
+ if (cmdq->wq->num_q_pages != 1) {
+ cmdq_first_block_paddr = cmdq_pages->cmdq_page_paddr;
+ pfn = CMDQ_PFN(cmdq_first_block_paddr);
+ }
+
+ ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
+ CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
+
+ cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
+ cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ cmdq_ctxt->cmdq_id = cmdq->cmdq_type;
+}
+
+bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 curr_pg_pfn, wq_block_pfn;
+
+ if (cmdq_ctxt->ppf_idx != hinic_ppf_idx(hwdev) ||
+ cmdq_ctxt->cmdq_id > HINIC_MAX_CMDQ_TYPES)
+ return false;
+
+ curr_pg_pfn = CMDQ_CTXT_PAGE_INFO_GET(ctxt_info->curr_wqe_page_pfn,
+ CURR_WQE_PAGE_PFN);
+ wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_GET(ctxt_info->wq_block_pfn,
+ WQ_BLOCK_PFN);
+ /* VF must use 0-level CLA */
+ if (curr_pg_pfn != wq_block_pfn)
+ return false;
+
+ return true;
+}
+
+static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
+ struct hinic_wq *wq, enum hinic_cmdq_type q_type)
+{
+ void __iomem *db_base;
+ int err = 0;
+
+ cmdq->wq = wq;
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+ cmdq->hwdev = hwdev;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->cmd_infos = kcalloc(wq->q_depth, sizeof(*cmdq->cmd_infos),
+ GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ err = -ENOMEM;
+ goto cmd_infos_err;
+ }
+
+ err = hinic_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err)
+ goto alloc_db_err;
+
+ cmdq->db_base = (u8 *)db_base;
+ return 0;
+
+alloc_db_err:
+ kfree(cmdq->cmd_infos);
+
+cmd_infos_err:
+ spin_lock_deinit(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
+{
+ hinic_free_db_addr(hwdev, cmdq->db_base, NULL);
+ kfree(cmdq->cmd_infos);
+ spin_lock_deinit(&cmdq->cmdq_lock);
+}
+
+int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hinic_cmdq_ctxt *cmdq_ctxt, cmdq_ctxt_out = {0};
+ enum hinic_cmdq_type cmdq_type;
+ u16 in_size;
+ u16 out_size = sizeof(*cmdq_ctxt);
+ int err;
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ in_size = sizeof(*cmdq_ctxt);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_CMDQ_CTXT_SET,
+ cmdq_ctxt, in_size,
+ &cmdq_ctxt_out, &out_size, 0);
+ if (err || !out_size || cmdq_ctxt_out.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, cmdq_ctxt_out.status, out_size);
+ return -EFAULT;
+ }
+ }
+
+ cmdqs->status |= HINIC_CMDQ_ENABLE;
+ cmdqs->disable_flag = 0;
+
+ return 0;
+}
+
+static void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq *cmdq)
+{
+ struct hinic_cmdq_wqe *wqe;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ u16 ci;
+
+ while ((wqe = hinic_read_wqe(cmdq->wq, 1, &ci)) != NULL) {
+ if (cmdq->cmdq_type == HINIC_CMDQ_ASYNC) {
+ cmd_info = &cmdq->cmd_infos[ci];
+ if (cmd_info->cmd_type == HINIC_CMD_TYPE_SET_ARM)
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ else
+ cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci);
+ } else {
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ }
+ }
+}
+
+int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic_cmdq_type cmdq_type;
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]);
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hinic_set_cmdq_ctxts(hwdev);
+}
+
+int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_cmdq_ctxt *cmdq_ctxt;
+ enum hinic_cmdq_type type, cmdq_type;
+ size_t saved_wqs_size;
+ u32 max_wqe_size;
+ int err;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+
+ saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
+ cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
+ if (!cmdqs->saved_wqs) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate saved wqs\n");
+ err = -ENOMEM;
+ goto alloc_wqs_err;
+ }
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev->dev_hdl,
+ HINIC_CMDQ_BUF_SIZE,
+ HINIC_CMDQ_BUF_SIZE, 0ULL);
+ if (!cmdqs->cmd_buf_pool) {
+ sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n");
+ err = -ENOMEM;
+ goto pool_create_err;
+ }
+
+ max_wqe_size = (u32)cmdq_wqe_size(WQE_LCMD_TYPE);
+ err = hinic_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ hwdev->dev_hdl, HINIC_MAX_CMDQ_TYPES,
+ hwdev->wq_page_size, CMDQ_WQEBB_SIZE,
+ HINIC_CMDQ_DEPTH, max_wqe_size);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq\n");
+ goto cmdq_alloc_err;
+ }
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
+ &cmdqs->saved_wqs[cmdq_type], cmdq_type);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n",
+ cmdq_type);
+ goto init_cmdq_err;
+ }
+
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type],
+ &cmdqs->cmdq_pages, cmdq_ctxt);
+ }
+
+ err = hinic_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto init_cmdq_err;
+
+ return 0;
+
+init_cmdq_err:
+ type = HINIC_CMDQ_SYNC;
+ for (; type < cmdq_type; type++)
+ free_cmdq(hwdev, &cmdqs->cmdq[type]);
+
+ hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+
+cmdq_alloc_err:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+pool_create_err:
+ kfree(cmdqs->saved_wqs);
+
+alloc_wqs_err:
+ kfree(cmdqs);
+
+ return err;
+}
+
+void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
+
+ cmdqs->status &= ~HINIC_CMDQ_ENABLE;
+
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]);
+ free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
+ }
+
+ hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+ kfree(cmdqs->saved_wqs);
+
+ kfree(cmdqs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
new file mode 100644
index 000000000000..eee7e6e3352c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_CMDQ_H_
+#define HINIC_CMDQ_H_
+
+#define HINIC_DB_OFF 0x00000800
+
+#define HINIC_SCMD_DATA_LEN 16
+
+#define HINIC_CMDQ_DEPTH 4096
+
+#define HINIC_CMDQ_BUF_SIZE 2048U
+#define HINIC_CMDQ_BUF_HW_RSVD 8
+#define HINIC_CMDQ_MAX_DATA_SIZE \
+ (HINIC_CMDQ_BUF_SIZE - HINIC_CMDQ_BUF_HW_RSVD)
+
+enum hinic_cmdq_type {
+ HINIC_CMDQ_SYNC,
+ HINIC_CMDQ_ASYNC,
+ HINIC_MAX_CMDQ_TYPES,
+};
+
+enum hinic_db_src_type {
+ HINIC_DB_SRC_CMDQ_TYPE,
+ HINIC_DB_SRC_L2NIC_SQ_TYPE,
+};
+
+enum hinic_cmdq_db_type {
+ HINIC_DB_SQ_RQ_TYPE,
+ HINIC_DB_CMDQ_TYPE,
+};
+
+/* CMDQ WQE CTRLS */
+struct hinic_cmdq_header {
+ u32 header_info;
+ u32 saved_data;
+};
+
+struct hinic_scmd_bufdesc {
+ u32 buf_len;
+ u32 rsvd;
+ u8 data[HINIC_SCMD_DATA_LEN];
+};
+
+struct hinic_lcmd_bufdesc {
+ struct hinic_sge sge;
+ u32 rsvd1;
+ u64 saved_async_buf;
+ u64 rsvd3;
+};
+
+struct hinic_cmdq_db {
+ u32 db_info;
+ u32 rsvd;
+};
+
+struct hinic_status {
+ u32 status_info;
+};
+
+struct hinic_ctrl {
+ u32 ctrl_info;
+};
+
+struct hinic_sge_resp {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_cmdq_completion {
+ /* HW Format */
+ union {
+ struct hinic_sge_resp sge_resp;
+ u64 direct_resp;
+ };
+};
+
+struct hinic_cmdq_wqe_scmd {
+ struct hinic_cmdq_header header;
+ struct hinic_cmdq_db db;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_scmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_wqe_lcmd {
+ struct hinic_cmdq_header header;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_lcmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_inline_wqe {
+ struct hinic_cmdq_wqe_scmd wqe_scmd;
+};
+
+struct hinic_cmdq_wqe {
+ /* HW Format */
+ union {
+ struct hinic_cmdq_inline_wqe inline_wqe;
+ struct hinic_cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+struct hinic_cmdq_arm_bit {
+ u32 q_type;
+ u32 q_id;
+};
+
+struct hinic_cmdq_ctxt_info {
+ u64 curr_wqe_page_pfn;
+ u64 wq_block_pfn;
+};
+
+struct hinic_cmdq_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 cmdq_id;
+ u8 ppf_idx;
+
+ u8 rsvd1[4];
+
+ struct hinic_cmdq_ctxt_info ctxt_info;
+};
+
+enum hinic_cmdq_status {
+ HINIC_CMDQ_ENABLE = BIT(0),
+};
+
+enum hinic_cmdq_cmd_type {
+ HINIC_CMD_TYPE_NONE,
+ HINIC_CMD_TYPE_SET_ARM,
+ HINIC_CMD_TYPE_DIRECT_RESP,
+ HINIC_CMD_TYPE_SGE_RESP,
+ HINIC_CMD_TYPE_ASYNC,
+ HINIC_CMD_TYPE_TIMEOUT,
+ HINIC_CMD_TYPE_FAKE_TIMEOUT,
+};
+
+struct hinic_cmdq_cmd_info {
+ enum hinic_cmdq_cmd_type cmd_type;
+
+ struct completion *done;
+ int *errcode;
+ int *cmpt_code;
+ u64 *direct_resp;
+ u64 cmdq_msg_id;
+};
+
+struct hinic_cmdq {
+ struct hinic_wq *wq;
+
+ enum hinic_cmdq_type cmdq_type;
+ int wrapped;
+
+ /* spinlock for send cmdq commands */
+ spinlock_t cmdq_lock;
+
+ /* doorbell area */
+ u8 __iomem *db_base;
+
+ struct hinic_cmdq_ctxt cmdq_ctxt;
+
+ struct hinic_cmdq_cmd_info *cmd_infos;
+
+ struct hinic_hwdev *hwdev;
+};
+
+struct hinic_cmdqs {
+ struct hinic_hwdev *hwdev;
+
+ struct pci_pool *cmd_buf_pool;
+
+ struct hinic_wq *saved_wqs;
+
+ struct hinic_cmdq_pages cmdq_pages;
+ struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
+
+ u32 status;
+ u32 disable_flag;
+};
+
+void hinic_cmdq_ceq_handler(void *hwdev, u32 ceqe_data);
+
+int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev);
+
+bool hinic_cmdq_idle(struct hinic_cmdq *cmdq);
+
+int hinic_cmdqs_init(struct hinic_hwdev *hwdev);
+
+void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
+
+bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_ctxt *cmdq_ctxt);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_csr.h
b/drivers/net/ethernet/huawei/hinic/hinic_csr.h
new file mode 100644
index 000000000000..7948e0d1ade9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_csr.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_CSR_H
+#define HINIC_CSR_H
+
+#define HINIC_CSR_GLOBAL_BASE_ADDR 0x4000
+
+/* HW interface registers */
+#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
+#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
+#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
+#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
+#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
+
+#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
+#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
+
+#define HINIC_CSR_DMA_ATTR_TBL_BASE 0xC80
+
+#define HINIC_ELECTION_BASE 0x200
+
+#define HINIC_CSR_DMA_ATTR_TBL_STRIDE 0x4
+#define HINIC_CSR_DMA_ATTR_TBL_ADDR(idx) \
+ (HINIC_CSR_DMA_ATTR_TBL_BASE \
+ + (idx) * HINIC_CSR_DMA_ATTR_TBL_STRIDE)
+
+#define HINIC_PPF_ELECTION_STRIDE 0x4
+#define HINIC_CSR_MAX_PORTS 4
+#define HINIC_CSR_PPF_ELECTION_ADDR \
+ (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE)
+
+#define HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR \
+ (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE + \
+ HINIC_CSR_MAX_PORTS * HINIC_PPF_ELECTION_STRIDE)
+
+/* MSI-X registers */
+#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
+#define HINIC_CSR_MSIX_CNT_BASE 0x2004
+
+#define HINIC_CSR_MSIX_STRIDE 0x8
+
+#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
+ (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
+ (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+/* EQ registers */
+#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
+#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400
+
+#define HINIC_EQ_MTT_OFF_STRIDE 0x40
+
+#define HINIC_CSR_AEQ_MTT_OFF(id) \
+ (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_MTT_OFF(id) \
+ (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
+
+#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)((type == HINIC_AEQ) ? \
+ HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \
+ HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HINIC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)((type == HINIC_AEQ) ? \
+ HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \
+ HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
+#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
+#define HINIC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08
+#define HINIC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C
+
+#define HINIC_EQ_OFF_STRIDE 0x80
+
+#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
+ (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
+ (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
+ (HINIC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
+ (HINIC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000
+#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004
+#define HINIC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008
+#define HINIC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C
+
+#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \
+ (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \
+ (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \
+ (HINIC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \
+ (HINIC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+/* API CMD registers */
+#define HINIC_CSR_API_CMD_BASE 0xF000
+
+#define HINIC_CSR_API_CMD_STRIDE 0x100
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_0_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+/* VF control registers in pf */
+#define HINIC_PF_CSR_VF_FLUSH_BASE 0x1F400
+#define HINIC_PF_CSR_VF_FLUSH_STRIDE 0x4
+
+#define HINIC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C
+
+#define HINIC_ICPL_RESERVD_ADDR 0x9204
+
+#define HINIC_PF_CSR_VF_FLUSH_OFF(idx) \
+ (HINIC_PF_CSR_VF_FLUSH_BASE + (idx) * HINIC_PF_CSR_VF_FLUSH_STRIDE)
+
+#define HINIC_IPSU_CHANNEL_NUM 7
+#define HINIC_IPSU_CHANNEL0_ADDR 0x404
+#define HINIC_IPSU_CHANNEL_OFFSET 0x14
+#define HINIC_IPSU_DIP_OFFSET 13
+#define HINIC_IPSU_SIP_OFFSET 14
+#define HINIC_IPSU_DIP_SIP_MASK \
+ ((0x1 << HINIC_IPSU_SIP_OFFSET) | (0x1 << HINIC_IPSU_DIP_OFFSET))
+
+/* For multi-host mgmt
+ * 0x75C0: bit0~3: uP write, host mode is bmwg or normal host
+ * bit4~7: master host ppf write when function initializing
+ * bit8~23: only for slave host PXE
+ * 0x75C4: slave host status
+ * bit0~7: host 0~7 functions status
+ */
+#define HINIC_HOST_MODE_ADDR 0x75C0
+#define HINIC_MULT_HOST_SLAVE_STATUS_ADDR 0x75C4
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h
b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h
new file mode 100644
index 000000000000..0b61e22501b5
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/******************************************************************************
+ *
+ * Copyright (C), 2001-2011, Huawei Tech. Co., Ltd.
+ *
+ ******************************************************************************
+ File Name : hinic_ctx_def.h
+ Version : Initial Draft
+ Author : Qu Huichun
+ Created : 2018/5/31
+ Last Modified :
+ Description : Commands between NIC and uP
+ Function List :
+ History :
+ 1.Date : 2018/5/31
+ Author : Qu Huichun
+ Modification: Created file
+
+******************************************************************************/
+
+#ifndef __HINIC_CTX_DEF_H__
+#define __HINIC_CTX_DEF_H__
+
+#ifdef __cplusplus
+ #if __cplusplus
+extern "C"{
+ #endif
+#endif /* __cplusplus */
+
+#define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask)
+
+#define HINIC_CEQE_QN_MASK 0x3FFU
+
+#define HINIC_Q_CTXT_MAX 42
+
+#define MAX_WQE_SIZE(max_sge, wqebb_size) \
+ (((max_sge) <= 2) ? (wqebb_size) : \
+ ((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size)))
+
+/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */
+#define HINIC_CI_Q_ADDR_SIZE (64)
+
+#define CI_TABLE_SIZE(num_qps, pg_sz) \
+ (ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz))
+
+#define HINIC_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + \
+ (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define HINIC_CI_PADDR(base_paddr, q_id) ((base_paddr) + \
+ (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define Q_CTXT_SIZE 48
+#define TSO_LRO_CTXT_SIZE 240
+
+#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \
+ + (q_id) * Q_CTXT_SIZE)
+
+#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \
+ + (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE)
+
+#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \
+ + (num_sqs) * sizeof(struct hinic_sq_ctxt)))
+
+#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \
+ + (num_rqs) * sizeof(struct hinic_rq_ctxt)))
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT 8
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
+#define SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
+#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT 31
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK 0x1FU
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FFU
+#define SQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
+#define SQ_CTXT_CEQ_ATTR_ARM_MASK 0x1U
+
+#define SQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & \
+ SQ_CTXT_CEQ_ATTR_##member##_MASK) \
+ << SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define SQ_CTXT_CI_IDX_SHIFT 11
+#define SQ_CTXT_CI_OWNER_SHIFT 23
+
+#define SQ_CTXT_CI_IDX_MASK 0xFFFU
+#define SQ_CTXT_CI_OWNER_MASK 0x1U
+
+#define SQ_CTXT_CI_SET(val, member) (((val) & \
+ SQ_CTXT_CI_##member##_MASK) \
+ << SQ_CTXT_CI_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define SQ_CTXT_WQ_PAGE_PI_SHIFT 20
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define SQ_CTXT_WQ_PAGE_PI_MASK 0xFFFU
+
+#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \
+ SQ_CTXT_WQ_PAGE_##member##_MASK) \
+ << SQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define SQ_CTXT_PREF_CI_SHIFT 20
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define SQ_CTXT_PREF_CI_MASK 0xFFFU
+
+#define SQ_CTXT_PREF_SET(val, member) (((val) & \
+ SQ_CTXT_PREF_##member##_MASK) \
+ << SQ_CTXT_PREF_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \
+ SQ_CTXT_WQ_BLOCK_##member##_MASK) \
+ << SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
+#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT 1
+
+#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
+#define RQ_CTXT_CEQ_ATTR_OWNER_MASK 0x1U
+
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & \
+ RQ_CTXT_CEQ_ATTR_##member##_MASK) \
+ << RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define RQ_CTXT_PI_IDX_SHIFT 0
+#define RQ_CTXT_PI_INTR_SHIFT 22
+#define RQ_CTXT_PI_CEQ_ARM_SHIFT 31
+
+#define RQ_CTXT_PI_IDX_MASK 0xFFFU
+#define RQ_CTXT_PI_INTR_MASK 0x3FFU
+#define RQ_CTXT_PI_CEQ_ARM_MASK 0x1U
+
+#define RQ_CTXT_PI_SET(val, member) (((val) & \
+ RQ_CTXT_PI_##member##_MASK) << \
+ RQ_CTXT_PI_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define RQ_CTXT_WQ_PAGE_CI_SHIFT 20
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define RQ_CTXT_WQ_PAGE_CI_MASK 0xFFFU
+
+#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \
+ RQ_CTXT_WQ_PAGE_##member##_MASK) << \
+ RQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define RQ_CTXT_PREF_CI_SHIFT 20
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define RQ_CTXT_PREF_CI_MASK 0xFFFU
+
+#define RQ_CTXT_PREF_SET(val, member) (((val) & \
+ RQ_CTXT_PREF_##member##_MASK) << \
+ RQ_CTXT_PREF_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \
+ RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
+ RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4)
+
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
+enum sq_cflag {
+ CFLAG_DATA_PATH = 0,
+};
+
+enum hinic_qp_ctxt_type {
+ HINIC_QP_CTXT_TYPE_SQ,
+ HINIC_QP_CTXT_TYPE_RQ,
+};
+
+/* service type relates define */
+enum cfg_svc_type_en {
+ CFG_SVC_NIC_BIT0 = (1 << 0),
+ CFG_SVC_ROCE_BIT1 = (1 << 1),
+ CFG_SVC_FCOE_BIT2 = (1 << 2),
+ CFG_SVC_TOE_BIT3 = (1 << 3),
+ CFG_SVC_IWARP_BIT4 = (1 << 4),
+ CFG_SVC_FC_BIT5 = (1 << 5),
+
+ CFG_SVC_FIC_BIT6 = (1 << 6),
+ CFG_SVC_OVS_BIT7 = (1 << 7),
+ CFG_SVC_ACL_BIT8 = (1 << 8),
+ CFG_SVC_IOE_BIT9 = (1 << 9),
+
+ CFG_SVC_FT_EN = (CFG_SVC_FCOE_BIT2 | CFG_SVC_TOE_BIT3 |
+ CFG_SVC_FC_BIT5 | CFG_SVC_IOE_BIT9),
+ CFG_SVC_RDMA_EN = (CFG_SVC_ROCE_BIT1 | CFG_SVC_IWARP_BIT4)
+};
+
+#define IS_NIC_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0)
+#define IS_ROCE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ROCE_BIT1)
+#define IS_FCOE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FCOE_BIT2)
+#define IS_TOE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_TOE_BIT3)
+#define IS_IWARP_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IWARP_BIT4)
+#define IS_FC_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FC_BIT5)
+#define IS_FIC_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FIC_BIT6)
+#define IS_OVS_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_OVS_BIT7)
+#define IS_ACL_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ACL_BIT8)
+#define IS_IOE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IOE_BIT9)
+#define IS_FT_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FT_EN)
+#define IS_RDMA_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_RDMA_EN)
+
+#ifdef __cplusplus
+ #if __cplusplus
+}
+ #endif
+#endif /* __cplusplus */
+#endif /* __HINIC_CTX_DEF_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbg.h
b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h
new file mode 100644
index 000000000000..bfc6aa4440af
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NIC_DBG_H_
+#define HINIC_NIC_DBG_H_
+
+u16 hinic_dbg_get_qp_num(void *hwdev);
+
+void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id);
+
+void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id);
+
+void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id);
+
+u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id);
+
+u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id);
+
+u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id);
+
+void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id);
+
+u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id);
+
+u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id);
+
+int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr,
+ u64 *phy_addr, u32 *pg_idx);
+
+u16 hinic_dbg_get_global_qpn(void *hwdev);
+
+int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size);
+
+int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size);
+
+int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data);
+
+int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data, u16 mask);
+
+int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u32 *value);
+
+int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u32 *value);
+
+int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value);
+
+int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 *value);
+
+int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 value);
+
+int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id,
+ u64 *value1, u64 *value2);
+
+int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id,
+ u64 value1, u64 value2);
+
+int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val);
+
+int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val);
+
+int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val);
+
+int hinic_dbg_get_hw_stats(void *hwdev, u8 *hw_stats, u16 *out_size);
+
+u16 hinic_dbg_clear_hw_stats(void *hwdev);
+
+void hinic_get_chip_fault_stats(void *hwdev, u8 *chip_fault_stats, int offset);
+
+int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
new file mode 100644
index 000000000000..efb17418b15b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/******************************************************************************
+ *
+ * Copyright (C), 2001-2011, Huawei Tech. Co., Ltd.
+ *
+ ******************************************************************************
+ File Name : hinic_dfx_def.h
+ Version : Initial Draft
+ Author : Qu Huichun
+ Created : 2018/6/1
+ Last Modified :
+ Description : Commands between NIC and uP
+ Function List :
+ History :
+ 1.Date : 2018/6/1
+ Author : Qu Huichun
+ Modification: Created file
+
+******************************************************************************/
+
+#ifndef __HINIC_DFX_DEF_H__
+#define __HINIC_DFX_DEF_H__
+
+#ifdef __cplusplus
+ #if __cplusplus
+extern "C"{
+ #endif
+#endif /* __cplusplus */
+
+enum module_name {
+ SEND_TO_NIC_DRIVER = 1,
+ SEND_TO_HW_DRIVER,
+ SEND_TO_UCODE,
+ SEND_TO_UP,
+ SEND_TO_SM,
+
+ HINICADM_OVS_DRIVER = 6,
+ HINICADM_ROCE_DRIVER,
+ HINICADM_TOE_DRIVER,
+ HINICADM_IWAP_DRIVER,
+ HINICADM_FC_DRIVER,
+ HINICADM_FCOE_DRIVER,
+};
+
+enum driver_cmd_type {
+ TX_INFO = 1,
+ Q_NUM,
+ TX_WQE_INFO,
+ TX_MAPPING,
+ RX_INFO,
+ RX_WQE_INFO,
+ RX_CQE_INFO,
+ UPRINT_FUNC_EN,
+ UPRINT_FUNC_RESET,
+ UPRINT_SET_PATH,
+ UPRINT_GET_STATISTICS,
+ FUNC_TYPE,
+ GET_FUNC_IDX,
+ GET_INTER_NUM,
+ CLOSE_TX_STREAM,
+ GET_DRV_VERSION,
+ CLEAR_FUNC_STASTIC,
+ GET_HW_STATS,
+ CLEAR_HW_STATS,
+ GET_SELF_TEST_RES,
+ GET_CHIP_FAULT_STATS,
+ GET_NUM_COS,
+ SET_COS_UP_MAP,
+ GET_COS_UP_MAP,
+ GET_CHIP_ID,
+ GET_SINGLE_CARD_INFO,
+ GET_FIRMWARE_ACTIVE_STATUS,
+ ROCE_DFX_FUNC,
+ GET_DEVICE_ID,
+ GET_PF_DEV_INFO,
+ CMD_FREE_MEM,
+ GET_LOOPBACK_MODE = 32,
+ SET_LOOPBACK_MODE,
+ SET_LINK_MODE,
+ SET_PF_BW_LIMIT,
+ GET_PF_BW_LIMIT,
+ ROCE_CMD,
+ GET_POLL_WEIGHT,
+ SET_POLL_WEIGHT,
+ GET_HOMOLOGUE,
+ SET_HOMOLOGUE,
+ GET_SSET_COUNT,
+ GET_SSET_ITEMS,
+ IS_DRV_IN_VM,
+ LRO_ADPT_MGMT,
+ SET_INTER_COAL_PARAM,
+ GET_INTER_COAL_PARAM,
+ GET_CHIP_INFO,
+ GET_NIC_STATS_LEN,
+ GET_NIC_STATS_STRING,
+ GET_NIC_STATS_INFO,
+ GET_PF_ID,
+
+ RSS_CFG = 0x40,
+ RSS_INDIR,
+ PORT_ID,
+
+ GET_WIN_STAT = 0x60,
+ WIN_CSR_READ = 0x61,
+ WIN_CSR_WRITE = 0x62,
+ WIN_API_CMD_RD = 0x63
+};
+
+enum hinic_nic_link_mode {
+ HINIC_LINK_MODE_AUTO = 0,
+ HINIC_LINK_MODE_UP,
+ HINIC_LINK_MODE_DOWN,
+ HINIC_LINK_MODE_MAX
+};
+
+enum api_chain_cmd_type {
+ API_CSR_READ,
+ API_CSR_WRITE
+};
+
+enum sm_cmd_type {
+SM_CTR_RD32 = 1,
+SM_CTR_RD64_PAIR,
+SM_CTR_RD64
+};
+
+enum hinic_show_set {
+ HINIC_SHOW_SSET_IO_STATS = 1,
+};
+
+#define HINIC_SHOW_ITEM_LEN 32
+struct hinic_show_item {
+ char name[HINIC_SHOW_ITEM_LEN];
+ u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */
+ u8 rsvd[7];
+ u64 value;
+};
+
+#define UP_UPDATEFW_TIME_OUT_VAL 20000U
+#define UCODE_COMP_TIME_OUT_VAL 0xFF00000
+#define NIC_TOOL_MAGIC 'x'
+
+#ifdef __cplusplus
+ #if __cplusplus
+}
+ #endif
+#endif /* __cplusplus */
+#endif /* __HINIC_DFX_DEF_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
new file mode 100644
index 000000000000..c025f3e80e8a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
@@ -0,0 +1,1302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_hwif.h"
+#include "hinic_csr.h"
+#include "hinic_eqs.h"
+
+#define HINIC_EQS_WQ_NAME "hinic_eqs"
+
+#define AEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
+#define AEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
+#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
+#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U
+
+#define AEQ_CTRL_0_SET(val, member) \
+ (((val) & AEQ_CTRL_0_##member##_MASK) << \
+ AEQ_CTRL_0_##member##_SHIFT)
+
+#define AEQ_CTRL_0_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_0_##member##_MASK \
+ << AEQ_CTRL_0_##member##_SHIFT)))
+
+#define AEQ_CTRL_1_LEN_SHIFT 0
+#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
+#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU
+#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U
+#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
+
+#define AEQ_CTRL_1_SET(val, member) \
+ (((val) & AEQ_CTRL_1_##member##_MASK) << \
+ AEQ_CTRL_1_##member##_SHIFT)
+
+#define AEQ_CTRL_1_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_1_##member##_MASK \
+ << AEQ_CTRL_1_##member##_SHIFT)))
+
+#define HINIC_EQ_PROD_IDX_MASK 0xFFFFF
+#define HINIC_TASK_PROCESS_EQE_LIMIT 1024
+#define HINIC_EQ_UPDATE_CI_STEP 64
+
+static uint g_aeq_len = HINIC_DEFAULT_AEQ_LEN;
+module_param(g_aeq_len, uint, 0444);
+MODULE_PARM_DESC(g_aeq_len,
+ "aeq depth, valid range is " __stringify(HINIC_MIN_AEQ_LEN)
+ " - " __stringify(HINIC_MAX_AEQ_LEN));
+
+static uint g_ceq_len = HINIC_DEFAULT_CEQ_LEN;
+module_param(g_ceq_len, uint, 0444);
+MODULE_PARM_DESC(g_ceq_len,
+ "ceq depth, valid range is " __stringify(HINIC_MIN_CEQ_LEN)
+ " - " __stringify(HINIC_MAX_CEQ_LEN));
+
+static uint g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT;
+module_param(g_num_ceqe_in_tasklet, uint, 0444);
+MODULE_PARM_DESC(g_num_ceqe_in_tasklet,
+ "The max number of ceqe can be processed in tasklet, default = 1024");
+
+#define CEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20
+#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
+#define CEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
+#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
+#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
+#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U
+
+#define CEQ_CTRL_0_SET(val, member) \
+ (((val) & CEQ_CTRL_0_##member##_MASK) << \
+ CEQ_CTRL_0_##member##_SHIFT)
+
+#define CEQ_CTRL_1_LEN_SHIFT 0
+#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU
+#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
+
+#define CEQ_CTRL_1_SET(val, member) \
+ (((val) & CEQ_CTRL_1_##member##_MASK) << \
+ CEQ_CTRL_1_##member##_SHIFT)
+
+#define EQ_ELEM_DESC_TYPE_SHIFT 0
+#define EQ_ELEM_DESC_SRC_SHIFT 7
+#define EQ_ELEM_DESC_SIZE_SHIFT 8
+#define EQ_ELEM_DESC_WRAPPED_SHIFT 31
+
+#define EQ_ELEM_DESC_TYPE_MASK 0x7FU
+#define EQ_ELEM_DESC_SRC_MASK 0x1U
+#define EQ_ELEM_DESC_SIZE_MASK 0xFFU
+#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U
+
+#define EQ_ELEM_DESC_GET(val, member) \
+ (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \
+ EQ_ELEM_DESC_##member##_MASK)
+
+#define EQ_CONS_IDX_CONS_IDX_SHIFT 0
+#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24
+#define EQ_CONS_IDX_INT_ARMED_SHIFT 31
+
+#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU
+#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU
+#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U
+
+#define EQ_CONS_IDX_SET(val, member) \
+ (((val) & EQ_CONS_IDX_##member##_MASK) << \
+ EQ_CONS_IDX_##member##_SHIFT)
+
+#define EQ_CONS_IDX_CLEAR(val, member) \
+ ((val) & (~(EQ_CONS_IDX_##member##_MASK \
+ << EQ_CONS_IDX_##member##_SHIFT)))
+
+#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT)
+
+#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \
+ ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
+
+#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
+ HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
+
+#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
+ HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
+
+#define GET_EQ_NUM_PAGES(eq, size) \
+ ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \
+ (size)) / (size)))
+
+#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size)
+
+#define GET_EQ_ELEMENT(eq, idx) \
+ (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \
+ (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
+
+#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *)\
+ GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx)
+
+#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx)
+
+#define PAGE_IN_4K(page_size) ((page_size) >> 12)
+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \
+ ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
+
+#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
+#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
+
+#define AEQ_DMA_ATTR_DEFAULT 0
+#define CEQ_DMA_ATTR_DEFAULT 0
+
+#define CEQ_LMT_KICK_DEFAULT 0
+
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+
+#define EQ_WRAPPED_SHIFT 20
+
+#define EQ_VALID_SHIFT 31
+
+#define CEQE_TYPE_SHIFT 23
+#define CEQE_TYPE_MASK 0x7
+
+#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \
+ CEQE_TYPE_MASK)
+
+#define CEQE_DATA_MASK 0x3FFFFFF
+#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK)
+
+#define EQ_MIN_PAGE_SIZE 0x1000U
+#define aeq_to_aeqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
+
+#define ceq_to_ceqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
+
+static irqreturn_t aeq_interrupt(int irq, void *data);
+static irqreturn_t ceq_interrupt(int irq, void *data);
+static void ceq_tasklet(ulong eq_tasklet);
+
+static u8 eq_cons_idx_checksum_set(u32 val)
+{
+ u8 checksum = 0;
+ u8 idx;
+
+ for (idx = 0; idx < 32; idx += 4)
+ checksum ^= ((val >> idx) & 0xF);
+
+ return checksum & 0xF;
+}
+
+/**
+ * hinic_aeq_register_hw_cb - register aeq callback for specific event
+ * @eqs: pointer to eqs part of the chip
+ * @event: event for the handler
+ * @handle: private data will be used by the callback
+ * @hw_cb: callback function
+ **/
+int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event,
+ hinic_aeq_hwe_cb hwe_cb)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev || !hwe_cb || event >= HINIC_MAX_AEQ_EVENTS)
+ return -EINVAL;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ aeqs->aeq_hwe_cb[event] = hwe_cb;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_aeq_register_hw_cb);
+
+/**
+ * hinic_aeq_unregister_hw_cb - unregister the aeq callback for specific event
+ * @eqs: pointer to eqs part of the chip
+ * @event: event for the handler
+ **/
+void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev)
+ return;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ if (event < HINIC_MAX_AEQ_EVENTS)
+ aeqs->aeq_hwe_cb[event] = NULL;
+}
+EXPORT_SYMBOL(hinic_aeq_unregister_hw_cb);
+
+/**
+ * hinic_aeq_register_sw_cb - register aeq callback for sw event
+ * @eqs: pointer to eqs part of the chip
+ * @handle: private data will be used by the callback
+ * @event: soft event for the handler
+ * @sw_cb: callback function
+ **/
+int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event,
+ hinic_aeq_swe_cb aeq_swe_cb)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev || !aeq_swe_cb || event >= HINIC_MAX_AEQ_SW_EVENTS)
+ return -EINVAL;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ aeqs->aeq_swe_cb[event] = aeq_swe_cb;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_aeq_register_swe_cb);
+
+/**
+ * hinic_aeq_unregister_sw_cb - unregister the aeq callback for sw event
+ * @eqs: pointer to eqs part of the chip
+ * @event: soft event for the handler
+ **/
+void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev)
+ return;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ if (event < HINIC_MAX_AEQ_SW_EVENTS)
+ aeqs->aeq_swe_cb[event] = NULL;
+}
+EXPORT_SYMBOL(hinic_aeq_unregister_swe_cb);
+
+/**
+ * hinic_ceq_register_sw_cb - register ceq callback for specific event
+ * @ceqs: pointer to eqs part of the chip
+ * @event: event for the handler
+ * @handle: private data will be used by the callback
+ * @ceq_cb: callback function
+ **/
+int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event,
+ hinic_ceq_event_cb callback)
+{
+ struct hinic_ceqs *ceqs;
+
+ if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS)
+ return -EINVAL;
+
+ ceqs = ((struct hinic_hwdev *)hwdev)->ceqs;
+
+ ceqs->ceq_cb[event] = callback;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_ceq_register_cb);
+
+/**
+ * hinic_ceq_unregister_cb - unregister ceq callback for specific event
+ * @ceqs: pointer to eqs part of the chip
+ * @event: event for the handler
+ **/
+void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event)
+{
+ struct hinic_ceqs *ceqs;
+
+ if (!hwdev)
+ return;
+
+ ceqs = ((struct hinic_hwdev *)hwdev)->ceqs;
+
+ if (event < HINIC_MAX_CEQ_EVENTS)
+ ceqs->ceq_cb[event] = NULL;
+}
+EXPORT_SYMBOL(hinic_ceq_unregister_cb);
+
+/**
+ * set_eq_cons_idx - write the cons idx to the hw
+ * @eq: The event queue to update the cons idx for
+ * @cons idx: consumer index value
+ **/
+static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
+{
+ u32 eq_wrap_ci, val;
+ u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
+
+ eq_wrap_ci = EQ_CONS_IDX(eq);
+
+ /* other filed is resverd, set to 0 */
+ val = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+ EQ_CONS_IDX_SET(arm_state, INT_ARMED);
+
+ val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
+
+ hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+/**
+ * ceq_event_handler - handle for the ceq events
+ * @eqs: eqs part of the chip
+ * @ceqe: ceq element of the event
+ **/
+static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
+{
+ struct hinic_hwdev *hwdev = ceqs->hwdev;
+ enum hinic_ceq_event event = CEQE_TYPE(ceqe);
+ u32 ceqe_data = CEQE_DATA(ceqe);
+
+ if (event >= HINIC_MAX_CEQ_EVENTS) {
+ sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n",
+ event, ceqe_data);
+ return;
+ }
+
+ if (ceqs->ceq_cb[event])
+ ceqs->ceq_cb[event](hwdev, ceqe_data);
+}
+
+/**
+ * aeq_irq_handler - handler for the aeq event
+ * @eq: the async event queue of the event
+ **/
+static bool aeq_irq_handler(struct hinic_eq *eq)
+{
+ struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
+ struct hinic_aeq_elem *aeqe_pos;
+ enum hinic_aeq_type event;
+ enum hinic_aeq_sw_type sw_event;
+ enum hinic_ucode_event_type ucode_event;
+ u64 aeqe_data;
+ u32 aeqe_desc;
+ u32 i, eqe_cnt = 0;
+ u8 size;
+ u8 lev;
+
+ for (i = 0; i < HINIC_TASK_PROCESS_EQE_LIMIT; i++) {
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+
+ /* Data in HW is in Big endian Format */
+ aeqe_desc = be32_to_cpu(aeqe_pos->desc);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
+ return false;
+
+ event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
+ if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
+ ucode_event = event;
+ /* SW event uses only the first 8B */
+ sw_event = ucode_event >= HINIC_NIC_FATAL_ERROR_MAX ?
+ HINIC_STATEFULL_EVENT :
+ HINIC_STATELESS_EVENT;
+ aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data));
+ if (aeqs->aeq_swe_cb[sw_event]) {
+ lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev,
+ ucode_event,
+ aeqe_data);
+ hinic_swe_fault_handler(aeqs->hwdev, lev,
+ ucode_event, aeqe_data);
+ }
+ } else {
+ if (event < HINIC_MAX_AEQ_EVENTS) {
+ size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
+ if (aeqs->aeq_hwe_cb[event])
+ aeqs->aeq_hwe_cb[event](aeqs->hwdev,
+ aeqe_pos->aeqe_data, size);
+ } else {
+ sdk_warn(eq->hwdev->dev_hdl,
+ "Unknown aeq hw event %d\n", event);
+ }
+ }
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ceq_irq_handler - handler for the ceq event
+ * @eq: the completion event queue of the event
+ **/
+static bool ceq_irq_handler(struct hinic_eq *eq)
+{
+ struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
+ u32 ceqe, eqe_cnt = 0;
+ u32 i;
+
+ for (i = 0; i < g_num_ceqe_in_tasklet; i++) {
+ ceqe = *(GET_CURR_CEQ_ELEM(eq));
+ ceqe = be32_to_cpu(ceqe);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ return false;
+
+ ceq_event_handler(ceqs, ceqe);
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+ }
+ }
+
+ return true;
+}
+
+static void reschedule_eq_handler(struct hinic_eq *eq)
+{
+ if (eq->type == HINIC_AEQ) {
+ struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
+ struct workqueue_struct *workq = aeqs->workq;
+ struct hinic_eq_work *aeq_work = &eq->aeq_work;
+
+ queue_work(workq, &aeq_work->work);
+ } else {
+ tasklet_schedule(&eq->ceq_tasklet);
+ }
+}
+
+/**
+ * eq_irq_handler - handler for the eq event
+ * @data: the event queue of the event
+ **/
+static bool eq_irq_handler(void *data)
+{
+ struct hinic_eq *eq = (struct hinic_eq *)data;
+ bool uncompleted;
+
+ if (eq->type == HINIC_AEQ)
+ uncompleted = aeq_irq_handler(eq);
+ else
+ uncompleted = ceq_irq_handler(eq);
+
+ set_eq_cons_idx(eq, uncompleted ? HINIC_EQ_NOT_ARMED : HINIC_EQ_ARMED);
+
+ return uncompleted;
+}
+
+static struct hinic_eq *find_eq(struct hinic_hwdev *hwdev, int msix_entry_idx)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ int i;
+
+ for (i = 0; i < aeqs->num_aeqs; i++) {
+ struct hinic_eq *eq = &aeqs->aeq[i];
+
+ if (eq->eq_irq.msix_entry_idx == msix_entry_idx)
+ return eq;
+ }
+
+ for (i = 0; i < ceqs->num_ceqs; i++) {
+ struct hinic_eq *eq = &ceqs->ceq[i];
+
+ if (eq->eq_irq.msix_entry_idx == msix_entry_idx)
+ return eq;
+ }
+
+ return NULL;
+}
+
+/* for windows */
+bool hinic_eq_intr_handler(void *hwdev, int msix_entry_idx)
+{
+ struct hinic_eq *eq;
+
+ eq = find_eq(hwdev, msix_entry_idx);
+ if (!eq) {
+ pr_err("Can't find eq in eq interrupt handler\n");
+ return false;
+ }
+
+ return eq_irq_handler(eq);
+}
+
+/**
+ * eq_irq_work - eq work for the event
+ * @work: the work that is associated with the eq
+ **/
+static void eq_irq_work(struct work_struct *work)
+{
+ struct hinic_eq_work *aeq_work =
+ container_of(work, struct hinic_eq_work, work);
+
+ if (eq_irq_handler(aeq_work->data))
+ reschedule_eq_handler(aeq_work->data);
+}
+
+/**
+ * aeq_interrupt - aeq interrupt handler
+ * @irq: irq number
+ * @data: the async event queue of the event
+ **/
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct hinic_eq *aeq = (struct hinic_eq *)data;
+ struct hinic_hwdev *hwdev = aeq->hwdev;
+
+ struct hinic_aeqs *aeqs = aeq_to_aeqs(aeq);
+ struct workqueue_struct *workq = aeqs->workq;
+ struct hinic_eq_work *aeq_work;
+
+ /* clear resend timer cnt register */
+ hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+
+ aeq_work = &aeq->aeq_work;
+ aeq_work->data = aeq;
+
+ queue_work(workq, &aeq_work->work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ceq_tasklet - ceq tasklet for the event
+ * @ceq_data: data that will be used by the tasklet(ceq)
+ **/
+
+static void ceq_tasklet(ulong ceq_data)
+{
+ struct hinic_ceq_tasklet_data *ceq_tasklet_data =
+ (struct hinic_ceq_tasklet_data *)ceq_data;
+ struct hinic_eq *eq = (struct hinic_eq *)ceq_tasklet_data->data;
+
+ eq->soft_intr_jif = jiffies;
+
+ if (eq_irq_handler(ceq_tasklet_data->data))
+ reschedule_eq_handler(ceq_tasklet_data->data);
+}
+
+/**
+ * ceq_interrupt - ceq interrupt handler
+ * @irq: irq number
+ * @data: the completion event queue of the event
+ **/
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hinic_eq *ceq = (struct hinic_eq *)data;
+ struct hinic_ceq_tasklet_data *ceq_tasklet_data;
+
+ ceq->hard_intr_jif = jiffies;
+
+ /* clear resend timer counters */
+ hinic_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+
+ ceq_tasklet_data = &ceq->ceq_tasklet_data;
+ ceq_tasklet_data->data = data;
+ tasklet_schedule(&ceq->ceq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct hinic_ceq_ctrl_reg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+};
+
+static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id,
+ u32 ctrl0, u32 ctrl1)
+{
+ struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
+ u16 in_size = sizeof(ceq_ctrl);
+ u16 out_size = sizeof(ceq_ctrl);
+ int err;
+
+ ceq_ctrl.func_id = hinic_global_func_id(hwdev);
+ ceq_ctrl.q_id = q_id;
+ ceq_ctrl.ctrl0 = ctrl0;
+ ceq_ctrl.ctrl1 = ctrl1;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,
+ &ceq_ctrl, in_size,
+ &ceq_ctrl, &out_size, 0);
+ if (err || !out_size || ceq_ctrl.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x,
out_size: 0x%x\n",
+ q_id, err, ceq_ctrl.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * set_eq_ctrls - setting eq's ctrls registers
+ * @eq: the event queue for setting
+ **/
+static int set_eq_ctrls(struct hinic_eq *eq)
+{
+ enum hinic_eq_type type = eq->type;
+ struct hinic_hwif *hwif = eq->hwdev->hwif;
+ struct irq_info *eq_irq = &eq->eq_irq;
+ u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
+ u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
+ int err;
+
+ if (type == HINIC_AEQ) {
+ /* set ctrl0 */
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
+ AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+ AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+ AEQ_CTRL_0_CLEAR(val, INTR_MODE);
+
+ ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+ val |= ctrl0;
+
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ /* set ctrl1 */
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+ elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
+
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl1);
+
+ } else {
+ ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+ CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
+ CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |
+ CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+
+ ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ /* set ceq ctrl reg through mgmt cpu */
+ err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ceq_elements_init - Initialize all the elements in the ceq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+ u32 i;
+ u32 *ceqe;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ ceqe = GET_CEQ_ELEM(eq, i);
+ *(ceqe) = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the init values */
+}
+
+/**
+ * aeq_elements_init - initialize all the elements in the aeq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+ struct hinic_aeq_elem *aeqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = GET_AEQ_ELEM(eq, i);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the init values */
+}
+
+/**
+ * alloc_eq_pages - allocate the pages for the queue
+ * @eq: the event queue
+ **/
+static int alloc_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwif *hwif = eq->hwdev->hwif;
+ u32 init_val;
+ u64 dma_addr_size, virt_addr_size;
+ u16 pg_num, i;
+ u32 reg;
+ int err;
+ u8 flag = 0;
+
+ dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
+ virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
+
+ eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
+ if (!eq->dma_addr)
+ return -ENOMEM;
+
+ eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
+ if (!eq->virt_addr) {
+ err = -ENOMEM;
+ goto virt_addr_alloc_err;
+ }
+
+ eq->dma_addr_for_free = kzalloc(dma_addr_size, GFP_KERNEL);
+ if (!eq->dma_addr_for_free) {
+ err = -ENOMEM;
+ goto dma_addr_free_alloc_err;
+ }
+
+ eq->virt_addr_for_free = kzalloc(virt_addr_size, GFP_KERNEL);
+ if (!eq->virt_addr_for_free) {
+ err = -ENOMEM;
+ goto virt_addr_free_alloc_err;
+ }
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
+ eq->virt_addr_for_free[pg_num] = dma_zalloc_coherent
+ (eq->hwdev->dev_hdl, eq->page_size,
+ &eq->dma_addr_for_free[pg_num], GFP_KERNEL);
+ if (!eq->virt_addr_for_free[pg_num]) {
+ err = -ENOMEM;
+ goto dma_alloc_err;
+ }
+
+ eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num];
+ eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num];
+ if (eq->dma_addr_for_free[pg_num] & (eq->page_size - 1)) {
+ sdk_warn(eq->hwdev->dev_hdl,
+ "Address is not aligned to %u-bytes as hardware required\n",
+ eq->page_size);
+ sdk_warn(eq->hwdev->dev_hdl, "Change eq's page size %u\n",
+ ((eq->page_size) >> 1));
+ eq->dma_addr[pg_num] = ALIGN
+ (eq->dma_addr_for_free[pg_num],
+ (u64)((eq->page_size) >> 1));
+ eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num] +
+ ((u64)eq->dma_addr[pg_num]
+ - (u64)eq->dma_addr_for_free[pg_num]);
+ flag = 1;
+ }
+ reg = HINIC_EQ_HI_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num);
+ hinic_hwif_write_reg(hwif, reg,
+ upper_32_bits(eq->dma_addr[pg_num]));
+
+ reg = HINIC_EQ_LO_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num);
+ hinic_hwif_write_reg(hwif, reg,
+ lower_32_bits(eq->dma_addr[pg_num]));
+ }
+
+ if (flag) {
+ eq->page_size = eq->page_size >> 1;
+ eq->eq_len = eq->eq_len >> 1;
+ }
+
+ eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size);
+ if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
+ sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of
2\n");
+ err = -EINVAL;
+ goto dma_alloc_err;
+ }
+ init_val = EQ_WRAPPED(eq);
+
+ if (eq->type == HINIC_AEQ)
+ aeq_elements_init(eq, init_val);
+ else
+ ceq_elements_init(eq, init_val);
+
+ return 0;
+
+dma_alloc_err:
+ for (i = 0; i < pg_num; i++)
+ dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size,
+ eq->virt_addr_for_free[i],
+ eq->dma_addr_for_free[i]);
+ kfree(eq->virt_addr_for_free);
+virt_addr_free_alloc_err:
+ kfree(eq->dma_addr_for_free);
+dma_addr_free_alloc_err:
+ kfree(eq->virt_addr);
+virt_addr_alloc_err:
+ kfree(eq->dma_addr);
+ return err;
+}
+
+/**
+ * free_eq_pages - free the pages of the queue
+ * @eq: the event queue
+ **/
+static void free_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwdev *hwdev = eq->hwdev;
+ u16 pg_num;
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
+ dma_free_coherent(hwdev->dev_hdl, eq->orig_page_size,
+ eq->virt_addr_for_free[pg_num],
+ eq->dma_addr_for_free[pg_num]);
+
+ kfree(eq->virt_addr_for_free);
+ kfree(eq->dma_addr_for_free);
+ kfree(eq->virt_addr);
+ kfree(eq->dma_addr);
+}
+
+static inline u32 get_page_size(struct hinic_eq *eq)
+{
+ u32 total_size;
+ u16 count, n = 0;
+
+ total_size = ALIGN((eq->eq_len * eq->elem_size), EQ_MIN_PAGE_SIZE);
+
+ if (total_size <= (HINIC_EQ_MAX_PAGES * EQ_MIN_PAGE_SIZE))
+ return EQ_MIN_PAGE_SIZE;
+
+ count = (u16)(ALIGN((total_size / HINIC_EQ_MAX_PAGES),
+ EQ_MIN_PAGE_SIZE) / EQ_MIN_PAGE_SIZE);
+
+ if (!(count & (count - 1)))
+ return EQ_MIN_PAGE_SIZE * count;
+
+ while (count) {
+ count >>= 1;
+ n++;
+ }
+
+ return EQ_MIN_PAGE_SIZE << n;
+}
+
+/**
+ * init_eq - initialize eq
+ * @eq: the event queue
+ * @hwif: the hardware interface of a pci function device
+ * @q_id: Queue id number
+ * @q_len: the number of EQ elements
+ * @type: the type of the event queue, ceq or aeq
+ * @page_size: the page size of the event queue
+ * @entry: msix entry associated with the event queue
+ * Return: 0 - Success, Negative - failure
+ **/
+static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
+ u32 q_len, enum hinic_eq_type type, struct irq_info *entry)
+{
+ int err = 0;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = type;
+ eq->eq_len = q_len;
+
+ /* Clear PI and CI, also clear the ARM bit */
+ hinic_hwif_write_reg(eq->hwdev->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
+ hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ eq->elem_size = (type == HINIC_AEQ) ?
+ HINIC_AEQE_SIZE : HINIC_CEQE_SIZE;
+
+ eq->page_size = get_page_size(eq);
+ eq->orig_page_size = eq->page_size;
+ eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size);
+ if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
+ sdk_err(hwdev->dev_hdl, "Number pages:%d too many pages for eq\n",
+ eq->num_pages);
+ return -EINVAL;
+ }
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ eq->eq_irq.msix_entry_idx = entry->msix_entry_idx;
+ eq->eq_irq.irq_id = entry->irq_id;
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n");
+ goto init_eq_ctrls_err;
+ }
+
+ set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+
+ if (type == HINIC_AEQ) {
+ struct hinic_eq_work *aeq_work = &eq->aeq_work;
+
+ INIT_WORK(&aeq_work->work, eq_irq_work);
+ } else {
+ tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
+ (ulong)(&eq->ceq_tasklet_data));
+ }
+
+ if (type == HINIC_AEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic_aeq%d@pci:%s", eq->q_id,
+ pci_name(hwdev->pcidev_hdl));
+ err = request_irq(entry->irq_id, aeq_interrupt, 0UL,
+ eq->irq_name, eq);
+ } else {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic_ceq%d@pci:%s", eq->q_id,
+ pci_name(hwdev->pcidev_hdl));
+ err = request_irq(entry->irq_id, ceq_interrupt, 0UL,
+ eq->irq_name, eq);
+ }
+
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to request irq for the eq, err: %d\n",
+ err);
+ goto req_irq_err;
+ }
+
+ hinic_set_msix_state(hwdev, entry->msix_entry_idx, HINIC_MSIX_ENABLE);
+
+ return 0;
+
+init_eq_ctrls_err:
+req_irq_err:
+ free_eq_pages(eq);
+ return err;
+}
+
+/**
+ * remove_eq - remove eq
+ * @eq: the event queue
+ **/
+static void remove_eq(struct hinic_eq *eq)
+{
+ struct irq_info *entry = &eq->eq_irq;
+
+ hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ synchronize_irq(entry->irq_id);
+ free_irq(entry->irq_id, eq);
+
+ if (eq->type == HINIC_AEQ) {
+ struct hinic_eq_work *aeq_work = &eq->aeq_work;
+
+ cancel_work_sync(&aeq_work->work);
+
+ /* clear eq_len to avoid hw access host memory */
+ hinic_hwif_write_reg(eq->hwdev->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+ } else {
+ tasklet_kill(&eq->ceq_tasklet);
+
+ set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+ }
+
+ /* update cons_idx to avoid invalid interrupt */
+ eq->cons_idx = hinic_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+
+ free_eq_pages(eq);
+}
+
+/**
+ * hinic_aeqs_init - init all the aeqs
+ * @aeqs: aeqs part of the chip
+ * @hwif: the hardware interface of a pci function device
+ * @num_ceqs: number of AEQs
+ * @q_len: number of EQ elements
+ * @page_size: the page size of the event queue
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ **/
+int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
+ struct irq_info *msix_entries)
+{
+ struct hinic_aeqs *aeqs;
+ int err;
+ u16 i, q_id;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+
+ aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
+ if (!aeqs->workq) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n");
+ err = -ENOMEM;
+ goto create_work_err;
+ }
+
+ if (g_aeq_len < HINIC_MIN_AEQ_LEN || g_aeq_len > HINIC_MAX_AEQ_LEN) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %d out of range,
resetting to %d\n",
+ g_aeq_len, HINIC_DEFAULT_AEQ_LEN);
+ g_aeq_len = HINIC_DEFAULT_AEQ_LEN;
+ }
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len,
+ HINIC_AEQ, &msix_entries[q_id]);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeq %d\n",
+ q_id);
+ goto init_aeq_err;
+ }
+ }
+
+ return 0;
+
+init_aeq_err:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&aeqs->aeq[i]);
+
+ destroy_workqueue(aeqs->workq);
+
+create_work_err:
+ kfree(aeqs);
+
+ return err;
+}
+
+/**
+ * hinic_aeqs_free - free all the aeqs
+ * @aeqs: aeqs part of the chip
+ **/
+void hinic_aeqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ enum hinic_aeq_type aeq_event = HINIC_HW_INTER_INT;
+ enum hinic_aeq_sw_type sw_aeq_event = HINIC_STATELESS_EVENT;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++)
+ remove_eq(&aeqs->aeq[q_id]);
+
+ for (; sw_aeq_event < HINIC_MAX_AEQ_SW_EVENTS; sw_aeq_event++)
+ hinic_aeq_unregister_swe_cb(hwdev, sw_aeq_event);
+
+ for (; aeq_event < HINIC_MAX_AEQ_EVENTS; aeq_event++)
+ hinic_aeq_unregister_hw_cb(hwdev, aeq_event);
+
+ destroy_workqueue(aeqs->workq);
+
+ kfree(aeqs);
+}
+
+/**
+ * hinic_ceqs_init - init all the ceqs
+ * @ceqs: ceqs part of the chip
+ * @hwif: the hardware interface of a pci function device
+ * @num_ceqs: number of CEQs
+ * @q_len: number of EQ elements
+ * @page_size: the page size of the event queue
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ **/
+int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs,
+ struct irq_info *msix_entries)
+{
+ struct hinic_ceqs *ceqs;
+ int err;
+ u16 i, q_id;
+
+ ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
+ if (!ceqs)
+ return -ENOMEM;
+
+ hwdev->ceqs = ceqs;
+ ceqs->hwdev = hwdev;
+ ceqs->num_ceqs = num_ceqs;
+
+ if (g_ceq_len < HINIC_MIN_CEQ_LEN || g_ceq_len > HINIC_MAX_CEQ_LEN) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %d out of range,
resetting to %d\n",
+ g_ceq_len, HINIC_DEFAULT_CEQ_LEN);
+ g_ceq_len = HINIC_DEFAULT_CEQ_LEN;
+ }
+
+ if (!g_num_ceqe_in_tasklet) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be
zero, resetting to %d\n",
+ HINIC_TASK_PROCESS_EQE_LIMIT);
+ g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT;
+ }
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len,
+ HINIC_CEQ, &msix_entries[q_id]);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init ceq %d\n",
+ q_id);
+ goto init_ceq_err;
+ }
+ }
+
+ return 0;
+
+init_ceq_err:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&ceqs->ceq[i]);
+
+ kfree(ceqs);
+
+ return err;
+}
+
+/**
+ * hinic_ceqs_free - free all the ceqs
+ * @ceqs: ceqs part of the chip
+ **/
+void hinic_ceqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ enum hinic_ceq_event ceq_event = HINIC_CMDQ;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
+ remove_eq(&ceqs->ceq[q_id]);
+
+ for (; ceq_event < HINIC_MAX_CEQ_EVENTS; ceq_event++)
+ hinic_ceq_unregister_cb(hwdev, ceq_event);
+
+ kfree(ceqs);
+}
+
+void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs)
+{
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id;
+ irqs[q_id].msix_entry_idx =
+ ceqs->ceq[q_id].eq_irq.msix_entry_idx;
+ }
+
+ *num_irqs = ceqs->num_ceqs;
+}
+
+void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id;
+ irqs[q_id].msix_entry_idx =
+ aeqs->aeq[q_id].eq_irq.msix_entry_idx;
+ }
+
+ *num_irqs = aeqs->num_aeqs;
+}
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_eq *eq;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
+ eq = &hwdev->aeqs->aeq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%x, pi: 0x%x\n",
+ q_id, ci, pi);
+ }
+}
+
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_eq *eq;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) {
+ eq = &hwdev->ceqs->ceq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%x, sw_ci: 0x%x, pi: 0x%x\n",
+ q_id, ci, eq->cons_idx, pi);
+ sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n",
+ jiffies_to_msecs(jiffies - eq->hard_intr_jif));
+ sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n",
+ jiffies_to_msecs(jiffies - eq->soft_intr_jif));
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
new file mode 100644
index 000000000000..bc87090479a9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_EQS_H
+#define HINIC_EQS_H
+
+#define HINIC_EQ_PAGE_SIZE 0x00001000
+
+#define HINIC_MAX_AEQS 4
+#define HINIC_MAX_CEQS 32
+
+#define HINIC_EQ_MAX_PAGES 8
+
+#define HINIC_AEQE_SIZE 64
+#define HINIC_CEQE_SIZE 4
+
+#define HINIC_AEQE_DESC_SIZE 4
+#define HINIC_AEQE_DATA_SIZE \
+ (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
+
+#define HINIC_DEFAULT_AEQ_LEN 0x10000
+#define HINIC_DEFAULT_CEQ_LEN 0x10000
+
+#define HINIC_MIN_AEQ_LEN 64
+#define HINIC_MAX_AEQ_LEN (512 * 1024)
+#define HINIC_MIN_CEQ_LEN 64
+#define HINIC_MAX_CEQ_LEN (1024 * 1024)
+
+#define HINIC_CEQ_ID_CMDQ 0
+
+#define EQ_IRQ_NAME_LEN 64
+
+enum hinic_eq_type {
+ HINIC_AEQ,
+ HINIC_CEQ
+};
+
+enum hinic_eq_intr_mode {
+ HINIC_INTR_MODE_ARMED,
+ HINIC_INTR_MODE_ALWAYS,
+};
+
+enum hinic_eq_ci_arm_state {
+ HINIC_EQ_NOT_ARMED,
+ HINIC_EQ_ARMED,
+};
+
+struct hinic_eq_work {
+ struct work_struct work;
+ void *data;
+};
+
+struct hinic_ceq_tasklet_data {
+ void *data;
+};
+
+struct hinic_eq {
+ struct hinic_hwdev *hwdev;
+ u16 q_id;
+ enum hinic_eq_type type;
+ u32 page_size;
+ u32 orig_page_size;
+ u32 eq_len;
+
+ u32 cons_idx;
+ u16 wrapped;
+
+ u16 elem_size;
+ u16 num_pages;
+ u32 num_elem_in_pg;
+
+ struct irq_info eq_irq;
+ char irq_name[EQ_IRQ_NAME_LEN];
+
+ dma_addr_t *dma_addr;
+ u8 **virt_addr;
+ dma_addr_t *dma_addr_for_free;
+ u8 **virt_addr_for_free;
+
+ struct hinic_eq_work aeq_work;
+ struct tasklet_struct ceq_tasklet;
+ struct hinic_ceq_tasklet_data ceq_tasklet_data;
+
+ u64 hard_intr_jif;
+ u64 soft_intr_jif;
+};
+
+struct hinic_aeq_elem {
+ u8 aeqe_data[HINIC_AEQE_DATA_SIZE];
+ u32 desc;
+};
+
+struct hinic_aeqs {
+ struct hinic_hwdev *hwdev;
+
+ hinic_aeq_hwe_cb aeq_hwe_cb[HINIC_MAX_AEQ_EVENTS];
+
+ hinic_aeq_swe_cb aeq_swe_cb[HINIC_MAX_AEQ_SW_EVENTS];
+
+ struct hinic_eq aeq[HINIC_MAX_AEQS];
+ u16 num_aeqs;
+
+ struct workqueue_struct *workq;
+};
+
+struct hinic_ceqs {
+ struct hinic_hwdev *hwdev;
+
+ hinic_ceq_event_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
+ void *ceq_data[HINIC_MAX_CEQ_EVENTS];
+
+ struct hinic_eq ceq[HINIC_MAX_CEQS];
+ u16 num_ceqs;
+};
+
+int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
+ struct irq_info *msix_entries);
+
+void hinic_aeqs_free(struct hinic_hwdev *hwdev);
+
+int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs,
+ struct irq_info *msix_entries);
+
+void hinic_ceqs_free(struct hinic_hwdev *hwdev);
+
+void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs);
+
+void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs);
+
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev);
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw.h
b/drivers/net/ethernet/huawei/hinic/hinic_hw.h
new file mode 100644
index 000000000000..8f0cec464328
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HW_H_
+#define HINIC_HW_H_
+
+#ifndef __BIG_ENDIAN__
+#define __BIG_ENDIAN__ 0x4321
+#endif
+
+#ifndef __LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN__ 0x1234
+#endif
+
+#ifdef __BYTE_ORDER__
+#undef __BYTE_ORDER__
+#endif
+/* X86 */
+#define __BYTE_ORDER__ __LITTLE_ENDIAN__
+
+enum hinic_mod_type {
+ HINIC_MOD_COMM = 0, /* HW communication module */
+ HINIC_MOD_L2NIC = 1, /* L2NIC module*/
+ HINIC_MOD_ROCE = 2,
+ HINIC_MOD_IWARP = 3,
+ HINIC_MOD_TOE = 4,
+ HINIC_MOD_FLR = 5,
+ HINIC_MOD_FCOE = 6,
+ HINIC_MOD_CFGM = 7, /* Configuration module */
+ HINIC_MOD_CQM = 8,
+ HINIC_MOD_VSWITCH = 9,
+ HINIC_MOD_FC = 10,
+ HINIC_MOD_OVS = 11,
+ HINIC_MOD_FIC = 12,
+ HINIC_MOD_MIGRATE = 13,
+ HINIC_MOD_HILINK = 14,
+ HINIC_MOD_HW_MAX = 16, /* hardware max module id */
+
+ /* Software module id, for PF/VF and multi-host */
+ HINIC_MOD_SW_FUNC = 17,
+ HINIC_MOD_MAX,
+};
+
+struct hinic_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+enum hinic_ack_type {
+ HINIC_ACK_TYPE_CMDQ,
+ HINIC_ACK_TYPE_SHARE_CQN,
+ HINIC_ACK_TYPE_APP_CQN,
+
+ HINIC_MOD_ACK_MAX = 15,
+
+};
+
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+
+int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+/* for pxe, ovs */
+int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+/* PF/VF send msg to uP by api cmd, and return immediately */
+int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size);
+
+int hinic_mbox_to_vf(void *hwdev, enum hinic_mod_type mod,
+ u16 vf_id, u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+int hinic_api_cmd_write_nack(void *hwdev, u8 dest,
+ void *cmd, u16 size);
+
+int hinic_api_cmd_read_ack(void *hwdev, u8 dest,
+ void *cmd, u16 size, void *ack, u16 ack_size);
+/* PF/VF send cmd to ucode by cmdq, and return if success.
+ * timeout=0, use default timeout.
+ */
+int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout);
+/* 1. whether need the timeout parameter
+ * 2. out_param indicates the status of the microcode processing command
+ */
+
+/* PF/VF send cmd to ucode by cmdq, and return detailed result.
+ * timeout=0, use default timeout.
+ */
+int hinic_cmdq_detail_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out, u32 timeout);
+
+/* PF/VF send cmd to ucode by cmdq, and return immediately
+ */
+int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in);
+
+int hinic_ppf_tmr_start(void *hwdev);
+int hinic_ppf_tmr_stop(void *hwdev);
+
+/* FOR windows */
+bool hinic_eq_intr_handler(void *hwdev, int msix_entry_idx);
+
+enum hinic_ceq_event {
+ HINIC_NON_L2NIC_SCQ,
+ HINIC_NON_L2NIC_ECQ,
+ HINIC_NON_L2NIC_NO_CQ_EQ,
+ HINIC_CMDQ,
+ HINIC_L2NIC_SQ,
+ HINIC_L2NIC_RQ,
+ HINIC_MAX_CEQ_EVENTS,
+};
+
+typedef void (*hinic_ceq_event_cb)(void *handle, u32 ceqe_data);
+int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event,
+ hinic_ceq_event_cb callback);
+void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event);
+
+enum hinic_aeq_type {
+ HINIC_HW_INTER_INT = 0,
+ HINIC_MBX_FROM_FUNC = 1,
+ HINIC_MSG_FROM_MGMT_CPU = 2,
+ HINIC_API_RSP = 3,
+ HINIC_API_CHAIN_STS = 4,
+ HINIC_MBX_SEND_RSLT = 5,
+ HINIC_MAX_AEQ_EVENTS
+};
+
+enum hinic_aeq_sw_type {
+ HINIC_STATELESS_EVENT = 0,
+ HINIC_STATEFULL_EVENT = 1,
+ HINIC_MAX_AEQ_SW_EVENTS
+};
+
+typedef void (*hinic_aeq_hwe_cb)(void *handle, u8 *data, u8 size);
+int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event,
+ hinic_aeq_hwe_cb hwe_cb);
+void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event);
+
+typedef u8 (*hinic_aeq_swe_cb)(void *handle, u8 event, u64 data);
+int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event,
+ hinic_aeq_swe_cb aeq_swe_cb);
+void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event);
+
+typedef void (*hinic_mgmt_msg_cb)(void *hwdev, void *pri_handle,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size);
+
+int hinic_register_mgmt_msg_cb(void *hwdev,
+ enum hinic_mod_type mod, void *pri_handle,
+ hinic_mgmt_msg_cb callback);
+void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod);
+
+struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev);
+void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *buf);
+
+int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base);
+void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base);
+int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base);
+void hinic_free_db_addr(void *hwdev, void __iomem *db_base,
+ void __iomem *dwqe_base);
+
+struct nic_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limt;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+int hinic_get_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info *interrupt_info);
+
+int hinic_set_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info interrupt_info);
+
+/* The driver code implementation interface*/
+void hinic_misx_intr_clear_resend_bit(void *hwdev,
+ u16 msix_idx, u8 clear_resend_en);
+
+struct hinic_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u64 ci_dma_base;
+};
+
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr);
+
+int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz);
+int hinic_clean_root_ctxt(void *hwdev);
+void hinic_record_pcie_error(void *hwdev);
+
+int hinic_func_rx_tx_flush(void *hwdev);
+
+int hinic_func_tmr_bitmap_set(void *hwdev, bool enable);
+
+struct hinic_init_para {
+ /* Record hinic_pcidev or NDIS_Adapter pointer address*/
+ void *adapter_hdl;
+ /* Record pcidev or Handler pointer address
+ * for example: ioremap interface input parameter
+ */
+ void *pcidev_hdl;
+ /* Record pcidev->dev or Handler pointer address which used to
+ * dma address application or dev_err print the parameter
+ */
+ void *dev_hdl;
+
+ void *cfg_reg_base; /* Configure virtual address, bar0/1*/
+ /* interrupt configuration register address, bar2/3 */
+ void *intr_reg_base;
+ u64 db_base_phy;
+ void *db_base; /* the doorbell address, bar4/5 higher 4M space*/
+ void *dwqe_mapping;/* direct wqe 4M, follow the doorbell address space*/
+ void **hwdev;
+ void *chip_node;
+ /* In bmgw x86 host, driver can't send message to mgmt cpu directly,
+ * need to trasmit message ppf mbox to bmgw arm host.
+ */
+ void *ppf_hwdev;
+};
+
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+#define MAX_FUNCTION_NUM 512
+#define HINIC_MAX_PF_NUM 16
+#define HINIC_MAX_COS 8
+#define INIT_FAILED 0
+#define INIT_SUCCESS 1
+#define MAX_DRV_BUF_SIZE 4096
+
+struct card_node {
+ struct list_head node;
+ struct list_head func_list;
+ char chip_name[IFNAMSIZ];
+ void *log_info;
+ void *dbgtool_info;
+ void *func_handle_array[MAX_FUNCTION_NUM];
+ unsigned char dp_bus_num;
+ u8 func_num;
+ struct attribute dbgtool_attr_file;
+
+ bool cos_up_setted;
+ u8 cos_up[HINIC_MAX_COS];
+ bool ppf_state;
+ u8 pf_bus_num[HINIC_MAX_PF_NUM];
+ bool disable_vf_load[HINIC_MAX_PF_NUM];
+ u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM];
+ u32 vf_mbx_rand_id[MAX_FUNCTION_NUM];
+};
+
+enum hinic_hwdev_init_state {
+ HINIC_HWDEV_NONE_INITED,
+ HINIC_HWDEV_AEQ_INITED,
+ HINIC_HWDEV_MGMT_INITED,
+ HINIC_HWDEV_MBOX_INITED,
+ HINIC_HWDEV_CMDQ_INITED,
+ HINIC_HWDEV_COMM_CH_INITED,
+ HINIC_HWDEV_ALL_INITED,
+};
+
+enum hinic_func_mode {
+ FUNC_MOD_NORMAL_HOST,
+ FUNC_MOD_MULTI_BM_MASTER,
+ FUNC_MOD_MULTI_BM_SLAVE,
+ FUNC_MOD_MULTI_VM_MASTER,
+ FUNC_MOD_MULTI_VM_SLAVE,
+};
+
+enum hinic_func_cap {
+ /* send message to mgmt cpu directly */
+ HINIC_FUNC_MGMT = 1 << 0,
+ /* setting port attribute, pause/speed etc. */
+ HINIC_FUNC_PORT = 1 << 1,
+ /* Enable SR-IOV in default */
+ HINIC_FUNC_SRIOV_EN_DFLT = 1 << 2,
+ /* Can't change VF num */
+ HINIC_FUNC_SRIOV_NUM_FIX = 1 << 3,
+ /* Fcorce pf/vf link up */
+ HINIC_FUNC_FORCE_LINK_UP = 1 << 4,
+ /* Support rate limit */
+ HINIC_FUNC_SUPP_RATE_LIMIT = 1 << 5,
+ HINIC_FUNC_SUPP_DFX_REG = 1 << 6,
+ /* Support promisc/multicast/all-multi */
+ HINIC_FUNC_SUPP_RX_MODE = 1 << 7,
+ /* Set vf mac and vlan by ip link */
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8,
+ /* Support set mac by ifconfig */
+ HINIC_FUNC_SUPP_CHANGE_MAC = 1 << 9,
+ /* OVS don't support SCTP_CRC/HW_VLAN/LRO */
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10,
+};
+
+#define FUNC_SUPPORT_MGMT(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_MGMT))
+#define FUNC_SUPPORT_PORT_SETTING(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_PORT))
+#define FUNC_SUPPORT_DCB(hwdev) \
+ (FUNC_SUPPORT_PORT_SETTING(hwdev))
+#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SRIOV_EN_DFLT))
+#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SRIOV_NUM_FIX))
+#define FUNC_SUPPORT_RX_MODE(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_RX_MODE))
+#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_RATE_LIMIT))
+#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN))
+#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_CHANGE_MAC))
+#define FUNC_FORCE_LINK_UP(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_FORCE_LINK_UP))
+#define FUNC_SUPPORT_SCTP_CRC(hwdev) \
+ (!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP))
+#define FUNC_SUPPORT_HW_VLAN(hwdev) \
+ (!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP))
+#define FUNC_SUPPORT_LRO(hwdev) \
+ (!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP))
+
+/* chip interface initialization(including allocating system initialization
+ * interface control structure, obtained from the space bar or FW interface
+ * capabilities, msix initialization, aeqs initialization, pf/up
+ * communication module initialization)
+ * adapter_hdl: record hinic_pcidev or NDIS_Adapter pointer address
+ * pcidev_hdl: record the pcidev or Handler pointer address
+ * dev_hdl: record pcidev->dev or Handler pointer which is used to dma address
+ * allocation or dev_err print the parameter
+ */
+int hinic_init_hwdev(struct hinic_init_para *para);
+void hinic_free_hwdev(void *hwdev);
+void hinic_shutdown_hwdev(void *hwdev);
+
+void hinic_ppf_hwdev_unreg(void *hwdev);
+void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev);
+bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state);
+enum hinic_func_mode hinic_get_func_mode(void *hwdev);
+u64 hinic_get_func_feature_cap(void *hwdev);
+
+int hinic_slq_init(void *dev, int num_wqs);
+void hinic_slq_uninit(void *dev);
+int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth,
+ u16 page_size, u64 *cla_addr, void **handle);
+void hinic_slq_free(void *dev, void *handle);
+u64 hinic_slq_get_addr(void *handle, u16 index);
+u64 hinic_slq_get_first_pageaddr(void *handle);
+
+typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd,
+ comm_up_self_msg_proc proc);
+
+void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd);
+
+int hinic_micro_log_path_set(void *hwdev, u8 *log_path);
+int hinic_micro_log_func_en(void *hwdev, u8 is_en);
+
+/* defined by chip */
+enum hinic_fault_type {
+ FAULT_TYPE_CHIP,
+ FAULT_TYPE_UCODE,
+ FAULT_TYPE_MEM_RD_TIMEOUT,
+ FAULT_TYPE_MEM_WR_TIMEOUT,
+ FAULT_TYPE_REG_RD_TIMEOUT,
+ FAULT_TYPE_REG_WR_TIMEOUT,
+ FAULT_TYPE_PHY_FAULT,
+ FAULT_TYPE_MAX,
+};
+
+/* defined by chip */
+enum hinic_fault_err_level {
+ /* default err_level=FAULT_LEVEL_FATAL if
+ * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT ||
+ * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT ||
+ * FAULT_TYPE_UCODE
+ * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP
+ */
+ FAULT_LEVEL_FATAL,
+ FAULT_LEVEL_SERIOUS_RESET,
+ FAULT_LEVEL_SERIOUS_FLR,
+ FAULT_LEVEL_GENERAL,
+ FAULT_LEVEL_SUGGESTION,
+ FAULT_LEVEL_MAX
+};
+
+enum hinic_fault_source_type {
+ /* same as FAULT_TYPE_CHIP */
+ HINIC_FAULT_SRC_HW_MGMT_CHIP = 0,
+ /* same as FAULT_TYPE_UCODE */
+ HINIC_FAULT_SRC_HW_MGMT_UCODE,
+ /* same as FAULT_TYPE_MEM_RD_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT,
+ /* same as FAULT_TYPE_MEM_WR_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT,
+ /* same as FAULT_TYPE_REG_RD_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT,
+ /* same as FAULT_TYPE_REG_WR_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT,
+ HINIC_FAULT_SRC_SW_MGMT_UCODE,
+ HINIC_FAULT_SRC_MGMT_WATCHDOG,
+ HINIC_FAULT_SRC_MGMT_RESET = 8,
+ HINIC_FAULT_SRC_HW_PHY_FAULT,
+ HINIC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20,
+ HINIC_FAULT_SRC_TYPE_MAX,
+};
+
+struct hinic_fault_sw_mgmt {
+ u8 event_id;
+ u64 event_data;
+};
+
+union hinic_fault_hw_mgmt {
+ u32 val[4];
+ /* valid only type==FAULT_TYPE_CHIP */
+ struct {
+ u8 node_id;
+ /* enum hinic_fault_err_level */
+ u8 err_level;
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+ /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR
+ */
+ u16 func_id;
+ u16 rsvd2;
+ } chip;
+
+ /* valid only type==FAULT_TYPE_UCODE */
+ struct {
+ u8 cause_id;
+ u8 core_id;
+ u8 c_id;
+ u8 rsvd3;
+ u32 epc;
+ u32 rsvd4;
+ u32 rsvd5;
+ } ucode;
+
+ /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT ||
+ * FAULT_TYPE_MEM_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr_ctrl;
+ u32 err_csr_data;
+ u32 ctrl_tab;
+ u32 mem_index;
+ } mem_timeout;
+
+ /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT ||
+ * FAULT_TYPE_REG_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr;
+ u32 rsvd6;
+ u32 rsvd7;
+ u32 rsvd8;
+ } reg_timeout;
+
+ struct {
+ /* 0: read; 1: write */
+ u8 op_type;
+ u8 port_id;
+ u8 dev_ad;
+ u8 rsvd9;
+ u32 csr_addr;
+ u32 op_data;
+ u32 rsvd10;
+ } phy_fault;
+};
+
+/* defined by chip */
+struct hinic_fault_event {
+ /* enum hinic_fault_type */
+ u8 type;
+ u8 rsvd0[3];
+ union hinic_fault_hw_mgmt event;
+};
+
+struct hinic_fault_recover_info {
+ u8 fault_src; /* enum hinic_fault_source_type */
+ u8 fault_lev; /* enum hinic_fault_err_level */
+ u8 rsvd0[2];
+ union {
+ union hinic_fault_hw_mgmt hw_mgmt;
+ struct hinic_fault_sw_mgmt sw_mgmt;
+ u32 mgmt_rsvd[4];
+ u32 host_rsvd[4];
+ } fault_data;
+};
+
+struct hinic_dcb_state {
+ u8 dcb_on;
+ u8 default_cos;
+ u8 up_cos[8];
+};
+
+enum link_err_type {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+enum port_module_event_type {
+ HINIC_PORT_MODULE_CABLE_PLUGGED,
+ HINIC_PORT_MODULE_CABLE_UNPLUGGED,
+ HINIC_PORT_MODULE_LINK_ERR,
+ HINIC_PORT_MODULE_MAX_EVENT,
+};
+
+struct hinic_port_module_event {
+ enum port_module_event_type type;
+ enum link_err_type err_type;
+};
+
+struct hinic_event_link_info {
+ u8 valid;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+};
+
+struct hinic_mctp_host_info {
+ u8 major_cmd;
+ u8 sub_cmd;
+ u8 rsvd[2];
+
+ u32 data_len;
+ void *data;
+};
+
+/* multi host mgmt event sub cmd */
+enum hinic_mhost_even_type {
+ HINIC_MHOST_NIC_STATE_CHANGE = 1,
+};
+
+struct hinic_mhost_nic_func_state {
+ u8 status;
+
+ u8 enable;
+ u16 func_idx;
+};
+
+struct hinic_multi_host_mgmt_event {
+ u16 sub_cmd;
+ u16 rsvd[3];
+
+ void *data;
+};
+
+enum hinic_event_type {
+ HINIC_EVENT_LINK_DOWN = 0,
+ HINIC_EVENT_LINK_UP = 1,
+ HINIC_EVENT_HEART_LOST = 2,
+ HINIC_EVENT_FAULT = 3,
+ HINIC_EVENT_NOTIFY_VF_DCB_STATE = 4,
+ HINIC_EVENT_DCB_STATE_CHANGE = 5,
+ HINIC_EVENT_FMW_ACT_NTC = 6,
+ HINIC_EVENT_PORT_MODULE_EVENT = 7,
+ HINIC_EVENT_MCTP_GET_HOST_INFO,
+ HINIC_EVENT_MULTI_HOST_MGMT,
+};
+
+struct hinic_event_info {
+ enum hinic_event_type type;
+ union {
+ struct hinic_event_link_info link_info;
+ struct hinic_fault_event info;
+ struct hinic_dcb_state dcb_state;
+ struct hinic_port_module_event module_event;
+ u8 vf_default_cos;
+ struct hinic_mctp_host_info mctp_info;
+ struct hinic_multi_host_mgmt_event mhost_mgmt;
+ };
+};
+
+enum hinic_ucode_event_type {
+ HINIC_INTERNAL_TSO_FATAL_ERROR = 0x0,
+ HINIC_INTERNAL_LRO_FATAL_ERROR = 0x1,
+ HINIC_INTERNAL_TX_FATAL_ERROR = 0x2,
+ HINIC_INTERNAL_RX_FATAL_ERROR = 0x3,
+ HINIC_INTERNAL_OTHER_FATAL_ERROR = 0x4,
+ HINIC_NIC_FATAL_ERROR_MAX = 0x8,
+};
+
+typedef void (*hinic_event_handler)(void *handle,
+ struct hinic_event_info *event);
+
+typedef void (*hinic_fault_recover_handler)(void *pri_handle,
+ struct hinic_fault_recover_info info);
+/* only register once */
+void hinic_event_register(void *dev, void *pri_handle,
+ hinic_event_handler callback);
+void hinic_event_unregister(void *dev);
+
+void hinic_detect_hw_present(void *hwdev);
+
+void hinic_set_chip_absent(void *hwdev);
+
+int hinic_get_chip_present_flag(void *hwdev);
+
+void hinic_set_pcie_order_cfg(void *handle);
+
+int hinic_get_mgmt_channel_status(void *handle);
+
+enum hinic_led_mode {
+ HINIC_LED_MODE_ON,
+ HINIC_LED_MODE_OFF,
+ HINIC_LED_MODE_FORCE_1HZ,
+ HINIC_LED_MODE_FORCE_2HZ,
+ HINIC_LED_MODE_FORCE_4HZ,
+ HINIC_LED_MODE_1HZ,
+ HINIC_LED_MODE_2HZ,
+ HINIC_LED_MODE_4HZ,
+ HINIC_LED_MODE_INVALID,
+};
+
+enum hinic_led_type {
+ HINIC_LED_TYPE_LINK,
+ HINIC_LED_TYPE_LOW_SPEED,
+ HINIC_LED_TYPE_HIGH_SPEED,
+ HINIC_LED_TYPE_INVALID,
+};
+
+int hinic_reset_led_status(void *hwdev, u8 port);
+int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type,
+ enum hinic_led_mode mode);
+
+struct hinic_board_info {
+ u32 board_type;
+ u32 port_num;
+ u32 port_speed;
+ u32 pcie_width;
+ u32 host_num;
+ u32 pf_num;
+ u32 vf_total_num;
+ u32 tile_num;
+ u32 qcm_num;
+ u32 core_num;
+ u32 work_mode;
+ u32 service_mode;
+ u32 pcie_mode;
+ u32 cfg_addr;
+ u32 boot_sel;
+ u32 board_id;
+};
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info);
+bool hinic_get_ppf_status(void *hwdev);
+
+struct hw_pf_info {
+ u16 glb_func_idx;
+ u16 glb_pf_vf_offset;
+ u8 p2p_idx;
+ u8 itf_idx;
+ u16 max_vfs;
+ u16 max_queue_num;
+ u16 ovs_q_vf_num[9];
+ u32 resv;
+};
+
+struct hinic_hw_pf_infos {
+ u8 num_pfs;
+ u8 rsvd1[3];
+
+ struct hw_pf_info infos[16];
+};
+
+int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos);
+int hinic_set_ip_check(void *hwdev, bool ip_check_ctl);
+int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+int hinic_mbox_ppf_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 func_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_get_card_present_state(void *hwdev, bool *card_present_state);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index 320711e8dee6..45d181baccd2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -13,141 +13,534 @@
*
*/
-#ifndef HINIC_HW_MGMT_H
-#define HINIC_HW_MGMT_H
+#ifndef HINIC_HW_MGMT_H_
+#define HINIC_HW_MGMT_H_
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/bitops.h>
+/* show each drivers only such as nic_service_cap,
+ * toe_service_cap structure, but not show service_cap
+ */
+enum hinic_service_type {
+ SERVICE_T_NIC = 0,
+ SERVICE_T_OVS,
+ SERVICE_T_ROCE,
+ SERVICE_T_TOE,
+ SERVICE_T_IWARP,
+ SERVICE_T_FC,
+ SERVICE_T_FCOE,
+ SERVICE_T_MAX,
+
+ /* Only used for interruption resource management,
+ * mark the request module
+ */
+ SERVICE_T_INTF = (1 << 15),
+ SERVICE_T_CQM = (1 << 16),
+};
+
+/* NIC service capability
+ * 1, The chip supports NIC RQ is 1K
+ * 2, PF/VF RQ specifications:
+ * disable RSS:
+ * disable VMDq: Each PF/VF at most 8 RQ
+ * enable the VMDq: Each PF/VF at most 1K RQ
+ * enable the RSS:
+ * disable VMDq: each PF at most 64 RQ, VF at most 32 RQ
+ * enable the VMDq: Each PF/VF at most 1K RQ
+ *
+ * 3, The chip supports NIC SQ is 1K
+ * 4, PF/VF SQ specifications:
+ * disable RSS:
+ * disable VMDq: Each PF/VF at most 8 SQ
+ * enable the VMDq: Each PF/VF at most 1K SQ
+ * enable the RSS:
+ * disable VMDq: each PF at most 64 SQ, VF at most 32 SQ
+ * enable the VMDq: Each PF/VF at most 1K SQ
+ */
+struct nic_service_cap {
+ /* PF resources*/
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* VF resources, vf obtain through the MailBox mechanism from
+ * according PF
+ */
+ u16 vf_max_sqs;
+ u16 vf_max_rqs;
+ bool lro_en; /* LRO feature enable bit*/
+ u8 lro_sz; /* LRO context space: n*16B */
+ u8 tso_sz; /* TSO context space: n*16B */
+};
+
+struct dev_roce_svc_own_cap {
+ u32 max_qps;
+ u32 max_cqs;
+ u32 max_srqs;
+ u32 max_mpts;
+
+ u32 vf_max_qps;
+ u32 vf_max_cqs;
+ u32 vf_max_srqs;
+ u32 vf_max_mpts;
+
+ u32 cmtt_cl_start;
+ u32 cmtt_cl_end;
+ u32 cmtt_cl_sz;
+
+ u32 dmtt_cl_start;
+ u32 dmtt_cl_end;
+ u32 dmtt_cl_sz;
+
+ u32 wqe_cl_start;
+ u32 wqe_cl_end;
+ u32 wqe_cl_sz;
+
+ u32 qpc_entry_sz;
+ u32 max_wqes;
+ u32 max_rq_sg;
+ u32 max_sq_inline_data_sz;
+ u32 max_rq_desc_sz;
+
+ u32 rdmarc_entry_sz;
+ u32 max_qp_init_rdma;
+ u32 max_qp_dest_rdma;
+
+ u32 max_srq_wqes;
+ u32 reserved_srqs;
+ u32 max_srq_sge;
+ u32 srqc_entry_sz;
+
+ u32 max_msg_sz;/* Message size 2GB*/
+
+ u8 num_cos;
+};
+
+struct dev_iwarp_svc_own_cap {
+ u32 max_qps;
+ u32 max_cqs;
+ u32 max_mpts;
+
+ u32 vf_max_qps;
+ u32 vf_max_cqs;
+ u32 vf_max_mpts;
+
+ u32 cmtt_cl_start;
+ u32 cmtt_cl_end;
+ u32 cmtt_cl_sz;
+
+ u32 dmtt_cl_start;
+ u32 dmtt_cl_end;
+ u32 dmtt_cl_sz;
+
+ u32 wqe_cl_start;
+ u32 wqe_cl_end;
+ u32 wqe_cl_sz;
+
+ u32 max_rq_sg;
+ u32 max_sq_inline_data_sz;
+ u32 max_rq_desc_sz;
+
+ u32 max_irq_depth;
+ u32 irq_entry_size; /* 64B */
+ u32 max_orq_depth;
+ u32 orq_entry_size; /* 32B */
+ u32 max_rtoq_depth;
+ u32 rtoq_entry_size; /* 32B */
+ u32 max_ackq_depth;
+ u32 ackq_entry_size; /* 16B */
+
+ u32 max_msg_sz; /* Message size 1GB*/
+
+ u32 max_wqes; /* 8K */
+ u32 qpc_entry_sz; /* 1K */
+
+ /* true:CQM uses static allocation;
+ * false:CQM uses dynamic allocation.
+ * Currently, only consider the QPC
+ */
+ bool alloc_flag;
+
+ u8 num_cos;
+};
+
+/* RDMA service capability structure*/
+struct dev_rdma_svc_cap {
+ /* ROCE service unique parameter structure */
+ struct dev_roce_svc_own_cap roce_own_cap;
+ /* IWARP service unique parameter structure */
+ struct dev_iwarp_svc_own_cap iwarp_own_cap;
+};
+
+/* Defines the RDMA service capability flag */
+enum {
+ RDMA_BMME_FLAG_LOCAL_INV = (1 << 0),
+ RDMA_BMME_FLAG_REMOTE_INV = (1 << 1),
+ RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2),
+ RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3),
+ RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4),
+ RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5),
+
+ RDMA_DEV_CAP_FLAG_XRC = (1 << 6),
+ RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7),
+ RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8),
+ RDMA_DEV_CAP_FLAG_APM = (1 << 9),
+};
+
+/* RDMA services*/
+struct rdma_service_cap {
+ struct dev_rdma_svc_cap dev_rdma_cap;
+
+ u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2
+ * 2. represented by logarithm. Each MTT table can
+ * contain 1, 2, 4, 8, and 16 PA)
+ */
+ u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2
+ * 2. represented by logarithm. Each MTT table can
+ * contain 1, 2, 4, 8, and 16 PA)
+ */
+
+ u32 reserved_qps; /* Number of reserved QP*/
+ u32 max_sq_sg; /* Maximum SGE number of SQ (8)*/
+ u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum
+ * size if 960B(944B aligned to the 960B),
+ * 960B=>wqebb alignment=>1024B
+ */
+ u32 wqebb_size; /* Currently, the supports 64B and 128B,
+ * defined as 64Bytes
+ */
+
+ u32 max_cqes; /* Size of the depth of the CQ (64K-1)*/
+ u32 reserved_cqs; /* Number of reserved CQ*/
+ u32 cqc_entry_sz; /* Size of the CQC (64B/128B)*/
+ u32 cqe_size; /* Size of CQE (32B)*/
+
+ u32 reserved_mrws; /* Number of reserved MR/MR Window*/
+ u32 mpt_entry_sz; /* MPT table size (64B)*/
+ u32 max_fmr_maps; /* max MAP of FMR,
+ * (1 << (32-ilog2(num_mpt)))-1;
+ */
+
+ /* todo: need to check whether related to max_mtt_seg */
+ u32 num_mtts; /* Number of MTT table (4M),
+ * is actually MTT seg number
+ */
+ /* todo: max value needs to be confirmed */
+ /* MTT table number of Each MTT seg(3)*/
+ u32 log_mtt_seg;
+ u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits)*/
+ u32 log_rdmarc_seg; /* table number of each RDMArc seg(3)*/
+
+ /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr]*/
+ u32 local_ca_ack_delay;
+ u32 num_ports; /* Physical port number*/
+
+ u32 db_page_size; /* Size of the DB (4KB)*/
+ u32 direct_wqe_size; /* Size of the DWQE (256B)*/
+
+ u32 num_pds; /* Maximum number of PD (128K)*/
+ u32 reserved_pds; /* Number of reserved PD*/
+ u32 max_xrcds; /* Maximum number of xrcd (64K)*/
+ u32 reserved_xrcds; /* Number of reserved xrcd*/
+
+ u32 max_gid_per_port; /* gid number (16) of each port*/
+ u32 gid_entry_sz; /* RoCE v2 GID table is 32B,
+ * compatible RoCE v1 expansion
+ */
+
+ u32 reserved_lkey; /* local_dma_lkey */
+ u32 num_comp_vectors; /* Number of complete vector (32)*/
+ u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size*/
+
+ u32 flags; /* RDMA some identity*/
+ u32 max_frpl_len; /* Maximum number of pages frmr registration*/
+ u32 max_pkeys; /* Number of supported pkey group*/
+};
+
+/* PF/VF FCoE service resource structure defined*/
+struct dev_fcoe_svc_cap {
+ /* PF resources*/
+ u32 max_qps;
+ u32 max_cqs;
+ u32 max_srqs;
+
+ /* Child Context(Task IO)
+ * For FCoE/IOE services, at most 8K
+ */
+ u32 max_cctxs;
+ u32 cctxs_id_start;
+
+ u8 vp_id_start;
+ u8 vp_id_end;
+};
+
+/* FCoE services*/
+struct fcoe_service_cap {
+ struct dev_fcoe_svc_cap dev_fcoe_cap;
-#include "hinic_hw_if.h"
-#include "hinic_hw_api_cmd.h"
+ /* SQ */
+ u32 qpc_basic_size;
+ u32 childc_basic_size;
+ u32 sqe_size;
-#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
-#define HINIC_MSG_HEADER_MODULE_SHIFT 11
-#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
-#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
-#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
-#define HINIC_MSG_HEADER_SEQID_SHIFT 24
-#define HINIC_MSG_HEADER_LAST_SHIFT 30
-#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
-#define HINIC_MSG_HEADER_CMD_SHIFT 32
-#define HINIC_MSG_HEADER_ZEROS_SHIFT 40
-#define HINIC_MSG_HEADER_PCI_INTF_SHIFT 48
-#define HINIC_MSG_HEADER_PF_IDX_SHIFT 50
-#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+ /* SCQ */
+ u32 scqc_basic_size;
+ u32 scqe_size;
+
+ /* SRQ */
+ u32 srqc_size;
+ u32 srqe_size;
+};
-#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
-#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
-#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
-#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
-#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
-#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
-#define HINIC_MSG_HEADER_LAST_MASK 0x1
-#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
-#define HINIC_MSG_HEADER_CMD_MASK 0xFF
-#define HINIC_MSG_HEADER_ZEROS_MASK 0xFF
-#define HINIC_MSG_HEADER_PCI_INTF_MASK 0x3
-#define HINIC_MSG_HEADER_PF_IDX_MASK 0xF
-#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+/* PF/VF ToE service resource structure */
+struct dev_toe_svc_cap {
+ /* PF resources*/
+ u32 max_pctxs; /* Parent Context: max specifications 1M*/
+ u32 max_cqs;
+ u32 max_srqs;
+ u32 srq_id_start;
-#define HINIC_MSG_HEADER_SET(val, member) \
- ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
- HINIC_MSG_HEADER_##member##_SHIFT)
+ u8 num_cos;
+};
-#define HINIC_MSG_HEADER_GET(val, member) \
- (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
- HINIC_MSG_HEADER_##member##_MASK)
+/* ToE services*/
+struct toe_service_cap {
+ struct dev_toe_svc_cap dev_toe_cap;
-enum hinic_mgmt_msg_type {
- HINIC_MGMT_MSG_SYNC = 1,
+ bool alloc_flag;
+ u32 pctx_sz;/* 1KB */
+ u32 scqc_sz;/* 64B */
};
-enum hinic_cfg_cmd {
- HINIC_CFG_NIC_CAP = 0,
+/* PF FC service resource structure defined*/
+struct dev_fc_svc_cap {
+ /* PF Parent QPC */
+ u32 max_parent_qpc_num; /* max number is 2048*/
+
+ /* PF Child QPC */
+ u32 max_child_qpc_num; /* max number is 2048*/
+ u32 child_qpc_id_start;
+
+ /* PF SCQ */
+ u32 scq_num; /* 16 */
+
+ /* PF supports SRQ*/
+ u32 srq_num; /* Number of SRQ is 2*/
+
+ u8 vp_id_start;
+ u8 vp_id_end;
};
-enum hinic_comm_cmd {
- HINIC_COMM_CMD_IO_STATUS_GET = 0x3,
+/* FC services*/
+struct fc_service_cap {
+ struct dev_fc_svc_cap dev_fc_cap;
+
+ /* Parent QPC */
+ u32 parent_qpc_size; /* 256B */
+
+ /* Child QPC */
+ u32 child_qpc_size; /* 256B */
+
+ /* SQ */
+ u32 sqe_size; /* 128B(in linked list mode)*/
- HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10,
- HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11,
+ /* SCQ */
+ u32 scqc_size; /* Size of the Context 32B*/
+ u32 scqe_size; /* 64B */
- HINIC_COMM_CMD_HWCTXT_SET = 0x12,
- HINIC_COMM_CMD_HWCTXT_GET = 0x13,
+ /* SRQ */
+ u32 srqc_size; /* Size of SRQ Context (64B)*/
+ u32 srqe_size; /* 32B */
+};
- HINIC_COMM_CMD_SQ_HI_CI_SET = 0x14,
+/* PF OVS service resource structure defined*/
+struct dev_ovs_svc_cap {
+ /* PF resources*/
+ u32 max_pctxs; /* Parent Context: max specifications 1M*/
+ u32 max_cqs;
- HINIC_COMM_CMD_RES_STATE_SET = 0x24,
+ /* VF resources*/
+ u32 vf_max_pctxs; /* Parent Context: max specifications 1M*/
+ u32 vf_max_cqs;
+};
- HINIC_COMM_CMD_IO_RES_CLEAR = 0x29,
+/* OVS services*/
+struct ovs_service_cap {
+ struct dev_ovs_svc_cap dev_ovs_cap;
- HINIC_COMM_CMD_MAX = 0x32,
+ bool alloc_flag;
+ u32 pctx_sz; /* 512B */
+ u32 scqc_sz; /* 64B */
};
-enum hinic_mgmt_cb_state {
- HINIC_MGMT_CB_ENABLED = BIT(0),
- HINIC_MGMT_CB_RUNNING = BIT(1),
+/* PF ACL service resource structure */
+struct dev_acl_svc_cap {
+ /* PF resources*/
+ u32 max_pctxs; /* Parent Context: max specifications 1M*/
+ u32 max_cqs;
+
+ /* VF resources*/
+ u32 vf_max_pctxs; /* Parent Context: max specifications 1M*/
+ u32 vf_max_cqs;
};
-struct hinic_recv_msg {
- u8 *msg;
- u8 *buf_out;
+/* ACL services*/
+struct acl_service_cap {
+ struct dev_acl_svc_cap dev_acl_cap;
+
+ bool alloc_flag;
+ u32 pctx_sz; /* 512B */
+ u32 scqc_sz; /* 64B */
+};
- struct completion recv_done;
+bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap);
+bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap);
+bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap);
+/* PPF support,PF not support*/
+bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap);
+bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap);
+bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap);
+bool hinic_support_fic(void *hwdev);
+bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap);
+bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap);
+bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap);
+bool hinic_support_ft(void *hwdev);
+bool hinic_func_for_mgmt(void *hwdev);
+
+int hinic_set_toe_enable(void *hwdev, bool enable);
+bool hinic_get_toe_enable(void *hwdev);
+int hinic_set_fcoe_enable(void *hwdev, bool enable);
+bool hinic_get_fcoe_enable(void *hwdev);
+bool hinic_get_stateful_enable(void *hwdev);
+
+/* Service interface for obtaining service_cap public fields*/
+/* Obtain service_cap.host_oq_id_mask_val*/
+u8 hinic_host_oq_id_mask(void *hwdev);
+u8 hinic_host_id(void *hwdev);/* Obtain service_cap.host_id*/
+/* Obtain service_cap.host_total_function*/
+u16 hinic_host_total_func(void *hwdev);
+/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs*/
+u16 hinic_func_max_nic_qnum(void *hwdev);
+/* Obtain service_cap.dev_cap.max_sqs*/
+u16 hinic_func_max_qnum(void *hwdev);
+u8 hinic_ep_id(void *hwdev);/* Obtain service_cap.ep_id*/
+u8 hinic_er_id(void *hwdev);/* Obtain service_cap.er_id*/
+u8 hinic_physical_port_id(void *hwdev);/* Obtain service_cap.port_id*/
+u8 hinic_func_max_vf(void *hwdev);/* Obtain service_cap.max_vf*/
+u32 hinic_func_pf_num(void *hwdev);/* Obtain service_cap.pf_num*/
+u8 hinic_max_num_cos(void *hwdev);
+u8 hinic_cos_valid_bitmap(void *hwdev);
+u8 hinic_net_port_mode(void *hwdev);/* Obtain service_cap.net_port_mode*/
+
+/* The following information is obtained from the bar space
+ * which is recorded by SDK layer.
+ * Here provide parameter query interface for service
+ */
+/* func_attr.glb_func_idx, global function index */
+u16 hinic_global_func_id(void *hwdev);
+/* func_attr.intr_num, MSI-X table entry in function*/
+u16 hinic_intr_num(void *hwdev);
+enum intr_type {
+ INTR_TYPE_MSIX,
+ INTR_TYPE_MSI,
+ INTR_TYPE_INT,
+ INTR_TYPE_NONE,
+ /* PXE,OVS need single thread processing,
+ * synchronization messages must use poll wait mechanism interface
+ */
+};
- u16 cmd;
- enum hinic_mod_type mod;
- int async_mgmt_to_pf;
+enum intr_type hinic_intr_type(void *hwdev);
- u16 msg_len;
- u16 msg_id;
+u8 hinic_pf_id_of_vf(void *hwdev); /* func_attr.p2p_idx, belongs to which pf */
+u8 hinic_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */
+u8 hinic_vf_in_pf(void *hwdev); /* func_attr.vf_in_pf, the vf offser in pf */
+enum func_type {
+ TYPE_PF,
+ TYPE_VF,
+ TYPE_PPF,
+ TYPE_UNKNOWN,
};
-struct hinic_mgmt_cb {
- void (*cb)(void *handle, u8 cmd,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size);
+/* func_attr.func_type, 0-PF 1-VF 2-PPF */
+enum func_type hinic_func_type(void *hwdev);
+
+u8 hinic_ceq_num(void *hwdev); /* func_attr.ceq_num, ceq num in one function */
+/* func_attr.dma_attr_entry_num, dma attribute entry num */
+u8 hinic_dma_attr_entry_num(void *hwdev);
+/* The PF func_attr.glb_pf_vf_offset,
+ * PF use only
+ */
+u16 hinic_glb_pf_vf_offset(void *hwdev);
+/* func_attr.mpf_idx, mpf global function index,
+ * This value is valid only when it is PF
+ */
+u8 hinic_mpf_idx(void *hwdev);
+u8 hinic_ppf_idx(void *hwdev);
- void *handle;
- unsigned long state;
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
};
-struct hinic_pf_to_mgmt {
- struct hinic_hwif *hwif;
+void hinic_set_msix_state(void *hwdev, u16 msix_idx,
+ enum hinic_msix_state flag);
+
+/* Define the version information structure*/
+struct dev_version_info {
+ u8 up_ver; /* uP version, directly read from uP
+ * is not configured to file
+ */
+ u8 ucode_ver; /* The microcode version,
+ * read through the CMDq from microcode
+ */
+ u8 cfg_file_ver;/* uP configuration file version*/
+ u8 sdk_ver; /* SDK driver version*/
+ u8 hw_ver; /* Hardware version*/
+};
- struct semaphore sync_msg_lock;
- u16 sync_msg_id;
- u8 *sync_msg_buf;
+/* Obtain service_cap.dev_version_info
+ */
+int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver);
- struct hinic_recv_msg recv_resp_msg_from_mgmt;
- struct hinic_recv_msg recv_msg_from_mgmt;
+int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector);
- struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+/* Defines the IRQ information structure*/
+struct irq_info {
+ u16 msix_entry_idx; /* IRQ corresponding index number */
+ u32 irq_id; /* the IRQ number from OS */
+};
- struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX];
+int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 req_num,
+ struct irq_info *irq_info_array, u16 *resp_num);
+void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id);
+int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int req_num,
+ int *ceq_id_array, int *resp_num);
+void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id);
+int hinic_sync_time(void *hwdev, u64 time);
+
+struct hinic_micro_log_info {
+ int (*init)(void *hwdev);
+ void (*deinit)(void *hwdev);
};
-void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod,
- void *handle,
- void (*callback)(void *handle,
- u8 cmd, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size));
+int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info);
+void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info);
+
+void hinic_set_func_deinit_flag(void *hwdev);
+void hinic_flush_mgmt_workq(void *hwdev);
-void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod);
+enum func_nic_state {
+ HINIC_FUNC_NIC_DEL,
+ HINIC_FUNC_NIC_ADD,
+};
-int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
- enum hinic_mgmt_msg_type sync);
+struct hinic_func_nic_state {
+ u8 state;
+ u8 rsvd0;
+ u16 func_idx;
-int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_hwif *hwif);
+ u8 rsvd1[16];
+};
-void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt);
+int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state);
+int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
new file mode 100644
index 000000000000..ffd4104b5a8f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
@@ -0,0 +1,5019 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_msix_attr.h"
+#include "hinic_nic_io.h"
+#include "hinic_eqs.h"
+#include "hinic_api_cmd.h"
+#include "hinic_mgmt.h"
+#include "hinic_mbox.h"
+#include "hinic_wq.h"
+#include "hinic_cmdq.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_hwif.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_multi_host_mgmt.h"
+
+#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+
+#define HINIC_WAIT_IO_STATUS_TIMEOUT 100
+
+#define HINIC_FLR_TIMEOUT 1000
+
+#define HINIC_HT_GPA_PAGE_SIZE 4096UL
+
+#define HINIC_PPF_HT_GPA_SET_RETRY_TIMES 10
+
+#define HINIC_OK_FLAG_OK 0
+
+#define HINIC_OK_FLAG_FAILED 1
+
+#define HINIC_GLB_SO_RO_CFG_SHIFT 0x0
+#define HINIC_GLB_SO_RO_CFG_MASK 0x1
+#define HINIC_DISABLE_ORDER 0
+#define HINIC_GLB_DMA_SO_RO_GET(val, member) \
+ (((val) >> HINIC_GLB_##member##_SHIFT) & HINIC_GLB_##member##_MASK)
+
+#define HINIC_GLB_DMA_SO_R0_CLEAR(val, member) \
+ ((val) & (~(HINIC_GLB_##member##_MASK << HINIC_GLB_##member##_SHIFT)))
+
+#define HINIC_GLB_DMA_SO_R0_SET(val, member) \
+ (((val) & HINIC_GLB_##member##_MASK) << HINIC_GLB_##member##_SHIFT)
+
+#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0
+#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1
+#define HINIC_ACTIVE_STATUS_MASK 0x80000000
+#define HINIC_ACTIVE_STATUS_CLEAR 0x7FFFFFFF
+
+#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \
+ (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK)
+
+#define HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, member) \
+ ((val) & (~(HINIC_##member##_MASK << HINIC_##member##_SHIFT)))
+
+#define HINIC_SET_MGMT_CHANNEL_STATUS(val, member) \
+ (((val) & HINIC_##member##_MASK) << HINIC_##member##_SHIFT)
+
+#define HINIC_BOARD_IS_PHY(hwdev) \
+ ((hwdev)->board_info.board_type == 4 && \
+ (hwdev)->board_info.board_id == 24)
+
+struct comm_info_ht_gpa_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 rsvd1;
+ u32 rsvd2;
+
+ u64 page_pa0;
+ u64 page_pa1;
+};
+
+struct comm_info_eqm_fix {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 chunk_num;
+ u32 search_gpa_num;
+};
+
+struct comm_info_eqm_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 ppf_id;
+ u32 page_size;
+ u32 valid;
+};
+
+struct comm_info_eqm_search_gpa {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 start_idx;
+ u32 num;
+ u32 resv0;
+ u32 resv1;
+ u64 gpa_hi52[0]; /*lint !e1501 */
+};
+
+struct hinic_cons_idx_attr {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 sq_id;
+ u64 ci_addr;
+};
+
+struct hinic_clear_doorbell {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hinic_clear_resource {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hinic_msix_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesct_timer_cnt;
+ u8 lli_tmier_cnt;
+ u8 lli_credit_cnt;
+ u8 resend_timer_cnt;
+ u8 rsvd1[3];
+};
+
+enum func_tmr_bitmap_status {
+ FUNC_TMR_BITMAP_DISABLE,
+ FUNC_TMR_BITMAP_ENABLE,
+};
+
+struct hinic_func_tmr_bitmap_op {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 op_id; /* 0:start; 1:stop */
+ u8 ppf_idx;
+ u32 rsvd1;
+};
+
+struct hinic_ppf_tmr_op {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 ppf_idx;
+ u8 op_id; /* 0: stop timer; 1:start timer */
+ u8 rsvd1[2];
+ u32 rsvd2;
+};
+
+struct hinic_cmd_set_res_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 state;
+ u8 rsvd1;
+ u32 rsvd2;
+};
+
+int hinic_hw_rx_buf_size[] = {
+ HINIC_RX_BUF_SIZE_32B,
+ HINIC_RX_BUF_SIZE_64B,
+ HINIC_RX_BUF_SIZE_96B,
+ HINIC_RX_BUF_SIZE_128B,
+ HINIC_RX_BUF_SIZE_192B,
+ HINIC_RX_BUF_SIZE_256B,
+ HINIC_RX_BUF_SIZE_384B,
+ HINIC_RX_BUF_SIZE_512B,
+ HINIC_RX_BUF_SIZE_768B,
+ HINIC_RX_BUF_SIZE_1K,
+ HINIC_RX_BUF_SIZE_1_5K,
+ HINIC_RX_BUF_SIZE_2K,
+ HINIC_RX_BUF_SIZE_3K,
+ HINIC_RX_BUF_SIZE_4K,
+ HINIC_RX_BUF_SIZE_8K,
+ HINIC_RX_BUF_SIZE_16K,
+};
+
+/* vf-pf dma attr table */
+struct hinic_vf_dma_attr_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 func_dma_entry_num;
+ u8 entry_idx;
+ u8 st;
+ u8 at;
+ u8 ph;
+ u8 no_snooping;
+ u8 tph_en;
+ u8 resv1[3];
+};
+
+struct hinic_led_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port;
+ u8 type;
+ u8 mode;
+ u8 reset;
+};
+
+struct hinic_comm_board_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_board_info info;
+
+ u32 rsvd1[4];
+};
+
+#define PHY_DOING_INIT_TIMEOUT (15 * 1000)
+
+struct hinic_phy_init_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 init_status;
+ u8 rsvd1[3];
+};
+
+enum phy_init_status_type {
+ PHY_INIT_DOING = 0,
+ PHY_INIT_SUCCESS = 1,
+ PHY_INIT_FAIL = 2,
+ PHY_NONSUPPORT = 3,
+};
+
+struct hinic_update_active {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 update_flag;
+ u32 update_status;
+};
+
+enum hinic_bios_cfg_op_code {
+ HINIC_BIOS_CFG_GET = 0,
+ HINIC_BIOS_CFG_PF_BW_LIMIT = 0x1 << 6,
+};
+
+struct hinic_bios_cfg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 op_code;
+ u32 signature;
+
+ u8 rsvd1[12];
+ u32 pf_bw_limit;
+ u8 rsvd2[5];
+
+ u8 func_valid;
+ u8 func_idx;
+ u8 rsvd3;
+};
+
+struct hinic_mgmt_watchdog_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 curr_time_h;
+ u32 curr_time_l;
+ u32 task_id;
+ u32 rsv;
+
+ u32 reg[13];
+ u32 pc;
+ u32 lr;
+ u32 cpsr;
+
+ u32 stack_top;
+ u32 stack_bottom;
+ u32 sp;
+ u32 curr_used;
+ u32 peak_used;
+ u32 is_overflow;
+
+ u32 stack_actlen;
+ u8 data[1024];
+};
+
+struct hinic_fmw_act_ntc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 rsvd1[5];
+};
+
+struct hinic_ppf_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+ u8 ppf_state;
+ u8 rsvd1[3];
+};
+
+#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
+
+struct hinic_wq_page_size {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */
+ u8 page_size;
+
+ u32 rsvd1;
+};
+
+#define MAX_PCIE_DFX_BUF_SIZE (1024)
+
+struct hinic_pcie_dfx_ntc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ int len;
+ u32 rsvd;
+};
+
+struct hinic_pcie_dfx_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 host_id;
+ u8 last;
+ u8 rsvd[2];
+ u32 offset;
+
+ u8 data[MAX_PCIE_DFX_BUF_SIZE];
+};
+
+struct hinic_hw_pf_infos_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_hw_pf_infos infos;
+};
+
+enum hinic_sdi_mode_ops {
+ HINIC_SDI_INFO_SET = 1U << 0, /* 1-save, 0-read */
+ HINIC_SDI_INFO_MODE = 1U << 1,
+};
+
+struct hinic_sdi_mode_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ /* Op-Code:
+ * Bit0: 0 - read configuration, 1 - write configuration
+ * Bit1: 0 - ignored, 1 - get/set SDI Mode
+ */
+ u32 opcode;
+ u32 signature;
+ u16 cur_sdi_mode;
+ u16 cfg_sdi_mode;
+
+ u32 rsvd1[29];
+};
+
+struct hinic_reg_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 reg_addr;
+ u32 val_length;
+
+ u32 data[2];
+};
+
+#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0
+#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8
+#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13
+
+#define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF
+#define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3
+#define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1
+
+#define HINIC_DMA_ATTR_ENTRY_SET(val, member) \
+ (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \
+ HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)
+
+#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \
+ ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \
+ << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)))
+
+#define HINIC_PCIE_ST_DISABLE 0
+#define HINIC_PCIE_AT_DISABLE 0
+#define HINIC_PCIE_PH_DISABLE 0
+
+#define PCIE_MSIX_ATTR_ENTRY 0
+
+#define HINIC_CHIP_PRESENT 1
+#define HINIC_CHIP_ABSENT 0
+
+struct hinic_cmd_fault_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_fault_event event;
+};
+
+#define HEARTBEAT_DRV_MAGIC_ACK 0x5A5A5A5A
+
+struct hinic_heartbeat_support {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 ppf_id;
+ u8 pf_issupport;
+ u8 mgmt_issupport;
+ u8 rsvd1[5];
+};
+
+struct hinic_heartbeat_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 mgmt_init_state;
+ u8 rsvd1[3];
+ u32 heart; /* increased every event */
+ u32 drv_heart;
+};
+
+static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out);
+static void hinic_set_mgmt_channel_status(void *handle, bool state);
+static inline void __set_heartbeat_ehd_detect_delay(struct hinic_hwdev *hwdev,
+ u32 delay_ms);
+
+#define HINIC_MGMT_STATUS_ERR_OK 0 /* Ok */
+#define HINIC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */
+#define HINIC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */
+#define HINIC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */
+#define HINIC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */
+#define HINIC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */
+#define HINIC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */
+#define HINIC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */
+#define HINIC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */
+#define HINIC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */
+#define HINIC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */
+#define HINIC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */
+#define HINIC_MGMT_STATUS_ERR_FULL 12 /* Table full */
+#define HINIC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */
+#define HINIC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */
+#define HINIC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */
+#define HINIC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */
+#define HINIC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */
+#define HINIC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */
+#define HINIC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */
+#define HINIC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */
+#define HINIC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */
+#define HINIC_MGMT_STATUS_ERR_UNSUPPORT 0xFF/* Feature not supported*/
+
+#define HINIC_QUEUE_MIN_DEPTH 6
+#define HINIC_QUEUE_MAX_DEPTH 12
+#define HINIC_MAX_RX_BUFFER_SIZE 15
+
+static bool check_root_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_root_ctxt *root_ctxt;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ root_ctxt = (struct hinic_root_ctxt *)buf_in;
+
+ if (root_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
+ return false;
+
+ if (root_ctxt->set_cmdq_depth) {
+ if (root_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ root_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH)
+ return true;
+
+ return false;
+ }
+
+ if (root_ctxt->rq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ root_ctxt->rq_depth <= HINIC_QUEUE_MAX_DEPTH &&
+ root_ctxt->sq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ root_ctxt->sq_depth <= HINIC_QUEUE_MAX_DEPTH &&
+ root_ctxt->rx_buf_sz <= HINIC_MAX_RX_BUFFER_SIZE)
+ return true;
+
+ if (!root_ctxt->rq_depth && !root_ctxt->sq_depth &&
+ !root_ctxt->rx_buf_sz)
+ return true;
+
+ return false;
+}
+
+static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ return hinic_cmdq_check_vf_ctxt(hwdev, buf_in);
+}
+
+static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_wq_page_size *page_size_info = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (page_size_info->ppf_idx != hinic_ppf_idx(hwdev))
+ return false;
+
+ if (((1U << page_size_info->page_size) * 0x1000) !=
+ HINIC_DEFAULT_WQ_PAGE_SIZE)
+ return false;
+
+ return true;
+}
+
+static bool __mbox_check_tmr_bitmap(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_func_tmr_bitmap_op *bitmap_op = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (bitmap_op->op_id == FUNC_TMR_BITMAP_ENABLE) {
+ if (!hinic_get_ppf_status(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "PPF timer is not init, can't enable %d timer
bitmap\n",
+ func_idx);
+ return false;
+ }
+ }
+
+ if (bitmap_op->ppf_idx != hinic_ppf_idx(hwdev))
+ return false;
+
+ return true;
+}
+
+struct vf_cmd_check_handle hw_cmd_support_vf[] = {
+ {HINIC_MGMT_CMD_START_FLR, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt},
+ {HINIC_MGMT_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt},
+ {HINIC_MGMT_CMD_VAT_SET, check_root_ctxt},
+ {HINIC_MGMT_CMD_VAT_GET, check_root_ctxt},
+ {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B},
+
+ {HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B},
+
+ {HINIC_MGMT_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_PAGESIZE_SET, check_set_wq_page_size},
+ {HINIC_MGMT_CMD_PAGESIZE_GET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_GET_PPF_STATE, NULL},
+ {HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET, __mbox_check_tmr_bitmap},
+ {HINIC_MGMT_CMD_GET_BOARD_INFO, NULL},
+ {HINIC_MGMT_CMD_GET_SDI_MODE, NULL},
+};
+
+struct hinic_mgmt_status_log {
+ u8 status;
+ const char *log;
+};
+
+struct hinic_mgmt_status_log mgmt_status_log[] = {
+ {HINIC_MGMT_STATUS_ERR_PARAM, "Invalid parameter"},
+ {HINIC_MGMT_STATUS_ERR_FAILED, "Operation failed"},
+ {HINIC_MGMT_STATUS_ERR_PORT, "Invalid port"},
+ {HINIC_MGMT_STATUS_ERR_TIMEOUT, "Operation time out"},
+ {HINIC_MGMT_STATUS_ERR_NOMATCH, "Version not match"},
+ {HINIC_MGMT_STATUS_ERR_EXIST, "Entry exists"},
+ {HINIC_MGMT_STATUS_ERR_NOMEM, "Out of memory"},
+ {HINIC_MGMT_STATUS_ERR_INIT, "Feature not initialized"},
+ {HINIC_MGMT_STATUS_ERR_FAULT, "Invalid address"},
+ {HINIC_MGMT_STATUS_ERR_PERM, "Operation not permitted"},
+ {HINIC_MGMT_STATUS_ERR_EMPTY, "Table empty"},
+ {HINIC_MGMT_STATUS_ERR_FULL, "Table full"},
+ {HINIC_MGMT_STATUS_ERR_NOT_FOUND, "Not found"},
+ {HINIC_MGMT_STATUS_ERR_BUSY, "Device or resource busy "},
+ {HINIC_MGMT_STATUS_ERR_RESOURCE, "No resources for operation "},
+ {HINIC_MGMT_STATUS_ERR_CONFIG, "Invalid configuration"},
+ {HINIC_MGMT_STATUS_ERR_UNAVAIL, "Feature unavailable"},
+ {HINIC_MGMT_STATUS_ERR_CRC, "CRC check failed"},
+ {HINIC_MGMT_STATUS_ERR_NXIO, "No such device or address"},
+ {HINIC_MGMT_STATUS_ERR_ROLLBACK, "Chip rollback fail"},
+ {HINIC_MGMT_STATUS_ERR_LEN, "Length too short or too long"},
+ {HINIC_MGMT_STATUS_ERR_UNSUPPORT, "Feature not supported"},
+};
+
+static void __print_status_info(struct hinic_hwdev *dev,
+ enum hinic_mod_type mod, u8 cmd, int index)
+{
+ if (mod == HINIC_MOD_COMM) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s",
+ mod, cmd, mgmt_status_log[index].log);
+ } else if (mod == HINIC_MOD_L2NIC ||
+ mod == HINIC_MOD_HILINK) {
+ if (HINIC_IS_VF(dev) && (cmd == HINIC_PORT_CMD_SET_MAC || cmd ==
+ HINIC_PORT_CMD_DEL_MAC || cmd ==
+ HINIC_PORT_CMD_UPDATE_MAC) &&
+ mgmt_status_log[index].status == HINIC_PF_SET_VF_ALREADY)
+ return;
+
+ nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s",
+ mod, cmd, mgmt_status_log[index].log);
+ }
+}
+
+static bool hinic_status_need_special_handle(enum hinic_mod_type mod,
+ u8 cmd, u8 status)
+{
+ if (mod == HINIC_MOD_L2NIC) {
+ /* optical module isn't plugged in */
+ if ((cmd == HINIC_PORT_CMD_GET_STD_SFP_INFO ||
+ cmd == HINIC_PORT_CMD_GET_SFP_INFO) &&
+ status == HINIC_MGMT_STATUS_ERR_NXIO)
+ return true;
+
+ if ((cmd == HINIC_PORT_CMD_SET_MAC ||
+ cmd == HINIC_PORT_CMD_UPDATE_MAC) &&
+ status == HINIC_MGMT_STATUS_ERR_EXIST)
+ return true;
+ }
+
+ return false;
+}
+
+void hinic_print_status_info(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_out)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int i, size;
+ u8 status;
+
+ if (!buf_out)
+ return;
+
+ if (mod != HINIC_MOD_COMM && mod != HINIC_MOD_L2NIC &&
+ mod != HINIC_MOD_HILINK)
+ return;
+
+ status = *(u8 *)buf_out;
+
+ if (!status)
+ return;
+
+ if (hinic_status_need_special_handle(mod, cmd, status))
+ return;
+
+ size = sizeof(mgmt_status_log) / sizeof(mgmt_status_log[0]);
+ for (i = 0; i < size; i++) {
+ if (status == mgmt_status_log[i].status) {
+ __print_status_info(dev, mod, cmd, i);
+ return;
+ }
+ }
+
+ if (mod == HINIC_MOD_COMM) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown
status(0x%x)\n",
+ mod, cmd, status);
+ } else if (mod == HINIC_MOD_L2NIC || mod == HINIC_MOD_HILINK) {
+ nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown
status(0x%x)\n",
+ mod, cmd, status);
+ }
+}
+
+void hinic_set_chip_present(void *hwdev)
+{
+ ((struct hinic_hwdev *)hwdev)->chip_present_flag = HINIC_CHIP_PRESENT;
+}
+
+void hinic_set_chip_absent(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ sdk_err(dev->dev_hdl, "Card not present\n");
+ dev->chip_present_flag = HINIC_CHIP_ABSENT;
+}
+
+int hinic_get_chip_present_flag(void *hwdev)
+{
+ int flag = ((struct hinic_hwdev *)hwdev)->chip_present_flag;
+ return flag;
+}
+
+static void hinic_set_fast_recycle_status(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ sdk_err(dev->dev_hdl, "Enter fast recycle status\n");
+ dev->chip_present_flag = HINIC_CHIP_ABSENT;
+}
+
+void hinic_force_complete_all(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_recv_msg *recv_resp_msg;
+ struct hinic_cmdq_cmd_info *cmdq_info;
+ struct hinic_cmdq *cmdq;
+ int pi = 0;
+ int ci = 0;
+ int delta = 0;
+ int i = 0;
+ u16 max_index = 0;
+
+ set_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state);
+
+ if (hinic_func_type(dev) != TYPE_VF &&
+ hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_MGMT_INITED)) {
+ recv_resp_msg = &dev->pf_to_mgmt->recv_resp_msg_from_mgmt;
+ if (dev->pf_to_mgmt->event_flag == SEND_EVENT_START) {
+ complete(&recv_resp_msg->recv_done);
+ dev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT;
+ }
+ }
+
+ if (!hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_CMDQ_INITED))
+ goto out;
+
+ cmdq = &dev->cmdqs->cmdq[HINIC_CMDQ_SYNC];
+ pi = cmdq->wq->prod_idx;
+ pi = MASKED_WQE_IDX(cmdq->wq, pi);
+ ci = cmdq->wq->cons_idx;
+ ci = MASKED_WQE_IDX(cmdq->wq, ci);
+ max_index = (cmdq->wq->q_depth) - 1;
+ delta = (pi >= ci) ? (pi - ci) : ((max_index - ci) + pi);
+
+ for (; i < delta; i++) {
+ cmdq_info = &cmdq->cmd_infos[ci];
+ spin_lock_bh(&cmdq->cmdq_lock);
+ if (cmdq_info->done) {
+ complete(cmdq_info->done);
+ cmdq_info->done = NULL;
+ atomic_add(1, &cmdq->wq->delta);
+ cmdq->wq->cons_idx += 1;
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ ci++;
+ ci = MASKED_WQE_IDX(cmdq->wq, ci);
+ }
+
+out:
+ clear_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state);
+}
+
+void hinic_detect_hw_present(void *hwdev)
+{
+ u32 addr, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr);
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ hinic_set_chip_absent(hwdev);
+ hinic_force_complete_all(hwdev);
+ }
+}
+
+void hinic_record_pcie_error(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+
+ if (!hwdev)
+ return;
+
+ atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats);
+}
+
+static int __func_send_mbox(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ err = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size, timeout);
+ else if (NEED_MBOX_FORWARD(hwdev))
+ err = hinic_mbox_to_host_sync(hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size,
+ timeout);
+ else
+ err = -EFAULT;
+
+ return err;
+}
+
+static int __pf_to_mgmt_pre_handle(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd)
+{
+ if (hinic_get_mgmt_channel_status(hwdev)) {
+ if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC)
+ return HINIC_DEV_BUSY_ACTIVE_FW;
+ else
+ return -EBUSY;
+ }
+
+ /* Set channel invalid, don't allowed to send other cmd */
+ if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_ACTIVATE_FW) {
+ hinic_set_mgmt_channel_status(hwdev, true);
+ /* stop heartbeat enhanced detection temporary, and will
+ * restart in firmware active event when mgmt is resetted
+ */
+ __set_heartbeat_ehd_detect_delay(hwdev,
+ HINIC_DEV_ACTIVE_FW_TIMEOUT);
+ }
+
+ return 0;
+}
+
+static void __pf_to_mgmt_after_handle(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd,
+ int sw_status, void *mgmt_status)
+{
+ /* if activate fw is failed, set channel valid */
+ if (mod == HINIC_MOD_COMM &&
+ cmd == HINIC_MGMT_CMD_ACTIVATE_FW) {
+ if (sw_status)
+ hinic_set_mgmt_channel_status(hwdev, false);
+ else
+ hinic_enable_mgmt_channel(hwdev, mgmt_status);
+ }
+}
+
+int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!((struct hinic_hwdev *)hwdev)->chip_present_flag)
+ return -EPERM;
+
+ if (NEED_MBOX_FORWARD(dev)) {
+ if (!hinic_is_hwdev_mod_inited(hwdev,
+ HINIC_HWDEV_MBOX_INITED)) {
+ return -EPERM;
+ }
+
+ err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ } else {
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED))
+ return -EPERM;
+
+ if (in_size > HINIC_MSG_TO_MGMT_MAX_LEN)
+ return -EINVAL;
+
+ err = __pf_to_mgmt_pre_handle(hwdev, mod, cmd);
+ if (err)
+ return err;
+
+ err = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ __pf_to_mgmt_after_handle(hwdev, mod, cmd, err, buf_out);
+ }
+
+ return err;
+}
+
+int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_hwdev *dev = hwdev;
+ unsigned long end;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(dev->chip_present_flag))
+ return -EPERM;
+
+ end = jiffies + msecs_to_jiffies(HINIC_DEV_ACTIVE_FW_TIMEOUT);
+ if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) {
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED))
+ return -EPERM;
+ do {
+ if (!hinic_get_chip_present_flag(hwdev))
+ break;
+
+ err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) {
+ hinic_print_status_info(hwdev, mod, cmd,
+ buf_out);
+ return err;
+ }
+
+ msleep(1000);
+ } while (time_before(jiffies, end));
+
+ err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ } else {
+ do {
+ if (!hinic_get_mgmt_channel_status(hwdev) ||
+ !hinic_get_chip_present_flag(hwdev))
+ break;
+
+ msleep(1000);
+ } while (time_before(jiffies, end));
+ err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size,
+ timeout);
+ }
+
+ hinic_print_status_info(hwdev, mod, cmd, buf_out);
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_msg_to_mgmt_sync);
+
+/* PF/VF send msg to uP by api cmd, and return immediately */
+int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) ||
+ hinic_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ err = -EFAULT;
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Mailbox don't support async cmd\n");
+ } else {
+ err = hinic_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_msg_to_mgmt_async);
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(dev->chip_present_flag))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) {
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ err = hinic_mbox_to_pf_no_ack(hwdev, mod, cmd, buf_in,
+ in_size);
+ else
+ err = hinic_mbox_to_host_no_ack(hwdev, mod, cmd, buf_in,
+ in_size);
+ } else {
+ err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size);
+ }
+
+ return err;
+}
+
+int hinic_mbox_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = __hinic_mbox_to_vf(hwdev, mod, vf_id, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) {
+ /* VF already in error condiction */
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "VF%d not initialized,
disconnect it\n",
+ vf_id);
+ hinic_unregister_vf_msg_handler(hwdev, vf_id);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_mbox_to_vf);
+
+/**
+ * hinic_cpu_to_be32 - convert data to big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert, must be Multiple of 4B
+ **/
+void hinic_cpu_to_be32(void *data, int len)
+{
+ int i, chunk_sz = sizeof(u32);
+ u32 *mem = data;
+
+ if (!data)
+ return;
+
+ len = len / chunk_sz;
+
+ for (i = 0; i < len; i++) {
+ *mem = cpu_to_be32(*mem);
+ mem++;
+ }
+}
+EXPORT_SYMBOL(hinic_cpu_to_be32);
+
+/**
+ * hinic_cpu_to_be32 - convert data from big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert
+ **/
+void hinic_be32_to_cpu(void *data, int len)
+{
+ int i, chunk_sz = sizeof(u32);
+ u32 *mem = data;
+
+ if (!data)
+ return;
+
+ len = len / chunk_sz;
+
+ for (i = 0; i < len; i++) {
+ *mem = be32_to_cpu(*mem);
+ mem++;
+ }
+}
+EXPORT_SYMBOL(hinic_be32_to_cpu);
+
+/**
+ * hinic_set_sge - set dma area in scatter gather entry
+ * @sge: scatter gather entry
+ * @addr: dma address
+ * @len: length of relevant data in the dma address
+ **/
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len)
+{
+ sge->hi_addr = upper_32_bits(addr);
+ sge->lo_addr = lower_32_bits(addr);
+ sge->len = len;
+}
+
+/**
+ * hinic_sge_to_dma - get dma address from scatter gather entry
+ * @sge: scatter gather entry
+ *
+ * Return dma address of sg entry
+ **/
+dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge)
+{
+ return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr);
+}
+
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr)
+{
+ struct hinic_cons_idx_attr cons_idx_attr = {0};
+ u16 out_size = sizeof(cons_idx_attr);
+ int err;
+
+ if (!hwdev || !attr)
+ return -EINVAL;
+
+ cons_idx_attr.func_idx = hinic_global_func_id(hwdev);
+
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.sq_id = q_id;
+
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET,
+ &cons_idx_attr, sizeof(cons_idx_attr),
+ &cons_idx_attr, &out_size, 0);
+ if (err || !out_size || cons_idx_attr.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set ci attribute table, err: %d, status: 0x%x, out_size:
0x%x\n",
+ err, cons_idx_attr.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_ci_table);
+
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ root_ctxt.func_idx = hinic_global_func_id(hwdev);
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static u16 get_hw_rx_buf_size(int rx_buf_sz)
+{
+ u16 num_hw_types =
+ sizeof(hinic_hw_rx_buf_size) /
+ sizeof(hinic_hw_rx_buf_size[0]);
+ u16 i;
+
+ for (i = 0; i < num_hw_types; i++) {
+ if (hinic_hw_rx_buf_size[i] == rx_buf_sz)
+ return i;
+ }
+
+ pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz);
+
+ return DEFAULT_RX_BUF_SIZE; /* default 2K */
+}
+
+int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)
+{
+ struct hinic_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ root_ctxt.func_idx = hinic_global_func_id(hwdev);
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+
+ root_ctxt.lro_en = 1;
+
+ root_ctxt.rq_depth = (u16)ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);
+ root_ctxt.sq_depth = (u16)ilog2(sq_depth);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_root_ctxt);
+
+int hinic_clean_root_ctxt(void *hwdev)
+{
+ struct hinic_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ root_ctxt.func_idx = hinic_global_func_id(hwdev);
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_clean_root_ctxt);
+
+static int wait_for_flr_finish(struct hinic_hwif *hwif)
+{
+ u32 cnt = 0;
+ enum hinic_pf_status status;
+
+ while (cnt < HINIC_FLR_TIMEOUT) {
+ status = hinic_get_pf_status(hwif);
+ if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) {
+ hinic_set_pf_status(hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
+ return 0;
+ }
+
+ usleep_range(9900, 10000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+
+#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 5000
+
+static int wait_cmdq_stop(struct hinic_hwdev *hwdev)
+{
+ enum hinic_cmdq_type cmdq_type;
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ u32 cnt = 0;
+ int err = 0;
+
+ if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HINIC_CMDQ_ENABLE;
+
+ while (cnt < HINIC_WAIT_CMDQ_IDLE_TIMEOUT) {
+ err = 0;
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) {
+ err = -EBUSY;
+ break;
+ }
+ }
+
+ if (!err)
+ return 0;
+
+ usleep_range(500, 1000);
+ cnt++;
+ }
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ sdk_err(hwdev->dev_hdl, "Cmdq %d busy\n", cmdq_type);
+ }
+
+ cmdqs->status |= HINIC_CMDQ_ENABLE;
+
+ return err;
+}
+
+static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clear_resource clr_res = {0};
+ int err;
+
+ err = wait_cmdq_stop(hwdev);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Cmdq is still working, please check CMDQ timeout
value is reasonable\n");
+
+ clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res));
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n");
+
+ /* PF firstly set VF doorbell flush csr to be disabled. After PF finish
+ * VF resources flush, PF will set VF doorbell flush csr to be enabled.
+ */
+ err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Wait doorbell flush disable timeout\n");
+ err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Wait doorbell flush enable timeout\n");
+
+ err = hinic_reinit_cmdq_ctxts(hwdev);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n");
+
+ return 0;
+}
+
+static void hinic_pf_set_vf_db_flush(struct hinic_hwdev *hwdev, u16 vf_id,
+ enum hinic_doorbell_ctrl val)
+{
+ u32 addr, vf_attr4;
+
+ addr = HINIC_PF_CSR_VF_FLUSH_OFF(vf_id);
+ vf_attr4 = hinic_hwif_read_reg(hwdev->hwif, addr);
+ vf_attr4 = HINIC_AF4_CLEAR(vf_attr4, DOORBELL_CTRL);
+ vf_attr4 |= HINIC_AF4_SET(val, DOORBELL_CTRL);
+ hinic_hwif_write_reg(hwdev->hwif, addr, vf_attr4);
+}
+
+static int hinic_vf_rx_tx_flush_in_pf(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct hinic_clear_doorbell clear_db = {0};
+ struct hinic_clear_resource clr_res = {0};
+ u16 glb_vf_func_id;
+ u16 out_size;
+ int err;
+ int ret = 0;
+
+ /* disable vf doorbell flush csr */
+ hinic_pf_set_vf_db_flush(hwdev, vf_id, DISABLE_DOORBELL);
+
+ /* doorbell flush */
+ out_size = sizeof(clear_db);
+ glb_vf_func_id = HINIC_HWIF_GLOBAL_VF_OFFSET(hwdev->hwif) + vf_id;
+ clear_db.func_idx = glb_vf_func_id;
+ clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
+ sizeof(clear_db), &clear_db, &out_size, 0);
+ if (err || !out_size || clear_db.status) {
+ sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, clear_db.status, out_size);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ /* wait ucode stop I/O */
+ msleep(100);
+
+ /* notice up begine vf flush */
+ out_size = sizeof(clr_res);
+ clr_res.func_idx = glb_vf_func_id;
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res), &clr_res, &out_size, 0);
+ if (err || !out_size || clr_res.status) {
+ sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, clr_res.status, out_size);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+ /* enable vf doorbell flush csr */
+ hinic_pf_set_vf_db_flush(hwdev, vf_id, ENABLE_DOORBELL);
+
+ return ret;
+}
+
+static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_clear_doorbell clear_db = {0};
+ struct hinic_clear_resource clr_res = {0};
+ u16 out_size;
+ int err;
+ int ret = 0;
+
+ /* wait ucode stop I/O */
+ msleep(100);
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout
value is reasonable\n");
+ ret = err;
+ }
+
+ hinic_disable_doorbell(hwif);
+
+ out_size = sizeof(clear_db);
+ clear_db.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+ clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
+ sizeof(clear_db), &clear_db, &out_size, 0);
+ if (err || !out_size || clear_db.status) {
+ sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, clear_db.status, out_size);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG);
+
+ clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+
+ err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res));
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n");
+ ret = err;
+ }
+
+ err = wait_for_flr_finish(hwif);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n");
+ ret = err;
+ }
+
+ hinic_enable_doorbell(hwif);
+
+ err = hinic_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n");
+ ret = err;
+ }
+
+ return ret;
+}
+
+int hinic_func_rx_tx_flush(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!dev->chip_present_flag)
+ return 0;
+
+ if (HINIC_FUNC_TYPE(dev) == TYPE_VF)
+ return hinic_vf_rx_tx_flush(dev);
+ else
+ return hinic_pf_rx_tx_flush(dev);
+}
+EXPORT_SYMBOL(hinic_func_rx_tx_flush);
+
+int hinic_get_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info *interrupt_info)
+{
+ struct hinic_hwdev *nic_hwdev = hwdev;
+ struct hinic_msix_config msix_cfg = {0};
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ if (!hwdev || !interrupt_info)
+ return -EINVAL;
+
+ msix_cfg.func_id = hinic_global_func_id(hwdev);
+ msix_cfg.msix_index = interrupt_info->msix_index;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, msix_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt;
+ interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt;
+ interrupt_info->pending_limt = msix_cfg.pending_cnt;
+ interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt;
+ interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_interrupt_cfg);
+
+int hinic_set_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info interrupt_info)
+{
+ struct hinic_hwdev *nic_hwdev = hwdev;
+ struct hinic_msix_config msix_cfg = {0};
+ struct nic_interrupt_info temp_info;
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ msix_cfg.func_id = hinic_global_func_id(hwdev);
+ msix_cfg.msix_index = (u16)interrupt_info.msix_index;
+
+ temp_info.msix_index = interrupt_info.msix_index;
+
+ err = hinic_get_interrupt_cfg(hwdev, &temp_info);
+ if (err)
+ return -EINVAL;
+
+ msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit;
+ msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg;
+ msix_cfg.pending_cnt = temp_info.pending_limt;
+ msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg;
+
+ if (interrupt_info.lli_set) {
+ msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit;
+ msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg;
+ }
+
+ if (interrupt_info.interrupt_coalesc_set) {
+ msix_cfg.pending_cnt = interrupt_info.pending_limt;
+ msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg;
+ }
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to set interrupt config, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, msix_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_interrupt_cfg);
+
+void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hinic_hwif *hwif;
+ u32 msix_ctrl = 0, addr;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ msix_ctrl = HINIC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CNT_ADDR(msix_idx);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+}
+EXPORT_SYMBOL(hinic_misx_intr_clear_resend_bit);
+
+static int init_aeqs_msix_attr(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ struct nic_interrupt_info info = {0};
+ struct hinic_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->eq_irq.msix_entry_idx;
+ err = hinic_set_interrupt_cfg(hwdev, info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n",
+ q_id);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int init_ceqs_msix_attr(struct hinic_hwdev *hwdev)
+{
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ struct nic_interrupt_info info = {0};
+ struct hinic_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = &ceqs->ceq[q_id];
+ info.msix_index = eq->eq_irq.msix_entry_idx;
+ err = hinic_set_interrupt_cfg(hwdev, info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %d failed\n",
+ q_id);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * set_pf_dma_attr_entry - set the dma attributes for entry
+ * @hwif: the hardware interface of a pci function device
+ * @entry_idx: the entry index in the dma table
+ * @st: PCIE TLP steering tag
+ * @at: PCIE TLP AT field
+ * @ph: PCIE TLP Processing Hint field
+ * @no_snooping: PCIE TLP No snooping
+ * @tph_en: PCIE TLP Processing Hint Enable
+ **/
+static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hinic_pcie_nosnoop no_snooping,
+ enum hinic_pcie_tph tph_en)
+{
+ u32 addr, val, dma_attr_entry;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx);
+
+ val = hinic_hwif_read_reg(hwdev->hwif, addr);
+ val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN);
+
+ dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) |
+ HINIC_DMA_ATTR_ENTRY_SET(at, AT) |
+ HINIC_DMA_ATTR_ENTRY_SET(ph, PH) |
+ HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) |
+ HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN);
+
+ val |= dma_attr_entry;
+ hinic_hwif_write_reg(hwdev->hwif, addr, val);
+}
+
+static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hinic_pcie_nosnoop no_snooping,
+ enum hinic_pcie_tph tph_en)
+{
+ struct hinic_vf_dma_attr_table attr = {0};
+ u16 out_size = sizeof(attr);
+ int err;
+
+ attr.func_idx = hinic_global_func_id(hwdev);
+
+ attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev);
+ attr.entry_idx = entry_idx;
+ attr.st = st;
+ attr.at = at;
+ attr.ph = ph;
+ attr.no_snooping = no_snooping;
+ attr.tph_en = tph_en;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_DMA_ATTR_SET, &attr,
+ sizeof(attr), &attr, &out_size, 0);
+ if (err || !out_size || attr.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set dma attribute, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, attr.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_attr_table_init - initialize the the default dma attributes
+ * @hwif: the hardware interface of a pci function device
+ **/
+static int dma_attr_table_init(struct hinic_hwdev *hwdev)
+{
+ int err = 0;
+
+ /* TODO: check if set pf dma attr through uP, the same as vf */
+ if (HINIC_IS_VF(hwdev))
+ err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+ HINIC_PCIE_ST_DISABLE,
+ HINIC_PCIE_AT_DISABLE,
+ HINIC_PCIE_PH_DISABLE,
+ HINIC_PCIE_SNOOP,
+ HINIC_PCIE_TPH_DISABLE);
+ else
+ set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+ HINIC_PCIE_ST_DISABLE,
+ HINIC_PCIE_AT_DISABLE,
+ HINIC_PCIE_PH_DISABLE,
+ HINIC_PCIE_SNOOP,
+ HINIC_PCIE_TPH_DISABLE);
+
+ return err;
+}
+
+static int resources_state_set(struct hinic_hwdev *hwdev,
+ enum hinic_res_state state)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_cmd_set_res_state res_state = {0};
+ u16 out_size = sizeof(res_state);
+ int err;
+
+ res_state.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+ res_state.state = state;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_RES_STATE_SET,
+ &res_state, sizeof(res_state),
+ &res_state, &out_size, 0);
+ if (err || !out_size || res_state.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set resources state, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, res_state.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_sync_heartbeat_status(struct hinic_hwdev *hwdev,
+ enum heartbeat_support_state pf_state,
+ enum heartbeat_support_state *mgmt_state)
+{
+ struct hinic_heartbeat_support hb_support = {0};
+ u16 out_size = sizeof(hb_support);
+ int err;
+
+ hb_support.ppf_id = hinic_ppf_idx(hwdev);
+ hb_support.pf_issupport = pf_state;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_HEARTBEAT_SUPPORTED,
+ &hb_support, sizeof(hb_support),
+ &hb_support, &out_size, 0);
+ if ((hb_support.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ hb_support.status) || err || !out_size) {
+ sdk_err(hwdev->dev_hdl, "Failed to synchronize heartbeat status, err: %d,
status: 0x%x, out_size: 0x%x\n",
+ err, hb_support.status, out_size);
+ return -EFAULT;
+ }
+
+ if (!hb_support.status)
+ *mgmt_state = hb_support.mgmt_issupport;
+
+ return hb_support.status;
+}
+
+static void comm_mgmt_msg_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = pri_handle;
+ u8 cmd_idx;
+ u32 *mem;
+ u16 i;
+
+ for (cmd_idx = 0; cmd_idx < pf_to_mgmt->proc.cmd_num; cmd_idx++) {
+ if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) {
+ if (!pf_to_mgmt->proc.info[cmd_idx].proc) {
+ sdk_warn(pf_to_mgmt->hwdev->dev_hdl,
+ "PF recv up comm msg handle null, cmd(0x%x)\n",
+ cmd);
+ } else {
+ pf_to_mgmt->proc.info[cmd_idx].proc(hwdev,
+ buf_in, in_size, buf_out, out_size);
+ }
+
+ return;
+ }
+ }
+
+ sdk_warn(pf_to_mgmt->hwdev->dev_hdl, "Received mgmt cpu event: 0x%x\n",
+ cmd);
+
+ mem = buf_in;
+ for (i = 0; i < (in_size / sizeof(u32)); i++) {
+ pr_info("0x%x\n", *mem);
+ mem++;
+ }
+
+ *out_size = 0;
+}
+
+static int hinic_vf_get_ppf_init_state(void *handle, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_ppf_state *ppf_state = buf_out;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ ppf_state->ppf_state = (u8)chip_node->ppf_state;
+
+ *out_size = sizeof(*ppf_state);
+
+ return 0;
+}
+
+int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode)
+{
+ struct hinic_sdi_mode_info sdi_mode = {0};
+ u16 out_size = sizeof(sdi_mode);
+ int err;
+
+ sdi_mode.opcode = HINIC_SDI_INFO_MODE & (~HINIC_SDI_INFO_SET);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_SDI_MODE, &sdi_mode,
+ sizeof(sdi_mode), &sdi_mode, &out_size, 0);
+ if ((sdi_mode.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ sdi_mode.status) || err || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get sdi mode info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, sdi_mode.status, out_size);
+ return -EFAULT;
+ }
+
+ *cur_mode = sdi_mode.cur_sdi_mode;
+
+ return sdi_mode.status;
+}
+
+int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ int err = 0;
+ u8 size = sizeof(hw_cmd_support_vf) / sizeof(hw_cmd_support_vf[0]);
+
+ if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd,
+ buf_in, in_size, size)) {
+ sdk_err(((struct hinic_hwdev *)handle)->dev_hdl,
+ "PF Receive VF(%d) common cmd(0x%x), mbox len(0x%x) is invalid\n",
+ vf_id + hinic_glb_pf_vf_offset(handle), cmd, in_size);
+ err = HINIC_MBOX_VF_CMD_ERROR;
+ return err;
+ }
+
+ if (cmd == HINIC_MGMT_CMD_START_FLR) {
+ *out_size = 0;
+ err = hinic_vf_rx_tx_flush_in_pf(handle, vf_id);
+ } else if (cmd == HINIC_MGMT_CMD_GET_PPF_STATE) {
+ err = hinic_vf_get_ppf_init_state(handle, buf_out, out_size);
+ } else {
+ err = hinic_pf_msg_to_mgmt_sync(handle, HINIC_MOD_COMM, cmd,
+ buf_in, in_size, buf_out,
+ out_size, 0U);
+ if (err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ sdk_err(((struct hinic_hwdev *)handle)->dev_hdl,
+ "PF mbox common callback handler err: %d\n",
+ err);
+ }
+
+ return err;
+}
+
+static int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev)
+{
+ struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} };
+ u16 num_aeqs, resp_num_irq = 0, i;
+ int err;
+
+ num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
+ if (num_aeqs > HINIC_MAX_AEQS) {
+ sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n",
+ HINIC_MAX_AEQS);
+ num_aeqs = HINIC_MAX_AEQS;
+ }
+ err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs,
+ &resp_num_irq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %d\n",
+ num_aeqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_aeqs) {
+ sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n",
+ resp_num_irq);
+ num_aeqs = resp_num_irq;
+ }
+
+ err = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n");
+ goto aeqs_init_err;
+ }
+
+ set_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+aeqs_init_err:
+ for (i = 0; i < num_aeqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id);
+
+ return err;
+}
+
+static void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev)
+{
+ struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} };
+ u16 num_irqs, i;
+
+ clear_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state);
+
+ hinic_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs);
+ hinic_aeqs_free(hwdev);
+ for (i = 0; i < num_irqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id);
+}
+
+static int hinic_comm_ceqs_init(struct hinic_hwdev *hwdev)
+{
+ struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} };
+ u16 num_ceqs, resp_num_irq = 0, i;
+ int err;
+
+ num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif);
+ if (num_ceqs > HINIC_MAX_CEQS) {
+ sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n",
+ HINIC_MAX_CEQS);
+ num_ceqs = HINIC_MAX_CEQS;
+ }
+
+ err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs,
+ &resp_num_irq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %d\n",
+ num_ceqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_ceqs) {
+ sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n",
+ resp_num_irq);
+ num_ceqs = resp_num_irq;
+ }
+
+ err = hinic_ceqs_init(hwdev, num_ceqs, ceq_irqs);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to init ceqs, err:%d\n", err);
+ goto ceqs_init_err;
+ }
+
+ return 0;
+
+ceqs_init_err:
+ for (i = 0; i < num_ceqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id);
+
+ return err;
+}
+
+static void hinic_comm_ceqs_free(struct hinic_hwdev *hwdev)
+{
+ struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} };
+ u16 num_irqs;
+ int i;
+
+ hinic_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs);
+ hinic_ceqs_free(hwdev);
+ for (i = 0; i < num_irqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id);
+}
+
+static int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_func_to_func_init(hwdev);
+ if (err)
+ return err;
+
+ hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_FROM_FUNC,
+ hinic_mbox_func_aeqe_handler);
+ hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_SEND_RSLT,
+ hinic_mbox_self_aeqe_handler);
+
+ if (!HINIC_IS_VF(hwdev)) {
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_pf_mbox_handler);
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC,
+ sw_func_pf_mbox_handler);
+ }
+
+ set_bit(HINIC_HWDEV_MBOX_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_FROM_FUNC);
+ hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_SEND_RSLT);
+
+ hinic_func_to_func_free(hwdev);
+}
+
+static int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return 0; /* VF do not support send msg to mgmt directly */
+
+ err = hinic_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ hinic_aeq_register_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU,
+ hinic_mgmt_msg_aeqe_handler);
+
+ hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_COMM,
+ hwdev->pf_to_mgmt, comm_mgmt_msg_handler);
+
+ set_bit(HINIC_HWDEV_MGMT_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ if (hinic_func_type(hwdev) == TYPE_VF &&
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return; /* VF do not support send msg to mgmt directly */
+
+ hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_COMM);
+
+ hinic_aeq_unregister_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU);
+
+ hinic_pf_to_mgmt_free(hwdev);
+}
+
+static int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_cmdqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n");
+ return err;
+ }
+
+ hinic_ceq_register_cb(hwdev, HINIC_CMDQ, hinic_cmdq_ceq_handler);
+
+ err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n");
+ goto set_cmdq_depth_err;
+ }
+
+ return 0;
+
+set_cmdq_depth_err:
+ hinic_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
+{
+ hinic_ceq_unregister_cb(hwdev, HINIC_CMDQ);
+ hinic_cmdqs_free(hwdev);
+}
+
+static inline void __set_heartbeat_ehd_detect_delay(struct hinic_hwdev *hwdev,
+ u32 delay_ms)
+{
+ hwdev->heartbeat_ehd.start_detect_jiffies =
+ jiffies + msecs_to_jiffies(delay_ms);
+}
+
+static int hinic_sync_mgmt_func_state(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
+
+ err = resources_state_set(hwdev, HINIC_RES_ACTIVE);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to set function resources state\n");
+ goto resources_state_set_err;
+ }
+
+ hwdev->heartbeat_ehd.en = false;
+ if (HINIC_FUNC_TYPE(hwdev) == TYPE_PPF) {
+ /* heartbeat synchronize must be after set pf active status */
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_HEARTBEAT_EVENT,
+ mgmt_heartbeat_event_handler);
+ }
+
+ return 0;
+
+resources_state_set_err:
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+
+ return err;
+}
+
+static void hinic_unsync_mgmt_func_state(struct hinic_hwdev *hwdev)
+{
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+
+ hwdev->heartbeat_ehd.en = false;
+ if (HINIC_FUNC_TYPE(hwdev) == TYPE_PPF) {
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_HEARTBEAT_EVENT);
+ }
+
+ resources_state_set(hwdev, HINIC_RES_CLEAN);
+}
+
+int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag)
+{
+ struct hinic_l2nic_reset l2nic_reset = {0};
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(l2nic_reset);
+ int err = 0;
+
+ err = hinic_set_vport_enable(hwdev, false);
+ if (err)
+ return err;
+
+ msleep(100);
+
+ sdk_info(hwdev->dev_hdl, "L2nic reset flag 0x%x\n", reset_flag);
+ l2nic_reset.func_id = HINIC_HWIF_GLOBAL_IDX(hwif);
+ l2nic_reset.reset_flag = reset_flag;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, 0);
+ if (err || !out_size || l2nic_reset.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to reset L2NIC resources, err: %d, status:
0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_l2nic_reset_base);
+
+static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ return hinic_l2nic_reset_base(hwdev, 0);
+}
+
+static int __get_func_misc_info(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_get_board_info(hwdev, &hwdev->board_info);
+ if (err) {
+ /* VF can't get board info in early version */
+ if (!HINIC_IS_VF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Get board info failed\n");
+ return err;
+ }
+ }
+
+ err = hinic_get_mgmt_version(hwdev, hwdev->mgmt_ver);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Get mgmt cpu version failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __init_eqs_msix_attr(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n");
+ return err;
+ }
+
+ err = init_ceqs_msix_attr(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* initialize communication channel */
+int hinic_init_comm_ch(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (IS_BMGW_SLAVE_HOST(hwdev) &&
+ (!get_master_host_mbox_enable(hwdev))) {
+ sdk_err(hwdev->dev_hdl, "Master host not initialized\n");
+ return -EFAULT;
+ }
+
+ err = hinic_comm_aeqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n");
+ return err;
+ }
+
+ err = hinic_comm_pf_to_mgmt_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init msg\n");
+ goto msg_init_err;
+ }
+
+ err = hinic_comm_func_to_func_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n");
+ goto func_to_func_init_err;
+ }
+
+ err = __get_func_misc_info(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to get function msic information\n");
+ goto get_func_info_err;
+ }
+
+ /* detect master host chip mode according board type and host id */
+ err = rectify_host_mode(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to rectify host mode\n");
+ goto rectify_mode_err;
+ }
+
+ err = hinic_l2nic_reset(hwdev);
+ if (err)
+ goto l2nic_reset_err;
+
+ if (IS_MULTI_HOST(hwdev)) {
+ err = hinic_multi_host_mgmt_init(hwdev);
+ if (err)
+ goto multi_host_mgmt_init_err;
+ }
+
+ dma_attr_table_init(hwdev);
+
+ err = hinic_comm_ceqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n");
+ goto ceqs_init_err;
+ }
+
+ err = __init_eqs_msix_attr(hwdev);
+ if (err)
+ goto init_eqs_msix_err;
+
+ /* set default wq page_size */
+ hwdev->wq_page_size = HINIC_DEFAULT_WQ_PAGE_SIZE;
+ err = hinic_set_wq_page_size(hwdev, hinic_global_func_id(hwdev),
+ hwdev->wq_page_size);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n");
+ goto init_wq_pg_size_err;
+ }
+
+ err = hinic_comm_cmdqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n");
+ goto cmdq_init_err;
+ }
+
+ set_bit(HINIC_HWDEV_CMDQ_INITED, &hwdev->func_state);
+
+ err = hinic_sync_mgmt_func_state(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to synchronize mgmt function state\n");
+ goto sync_mgmt_func_err;
+ }
+
+ err = hinic_aeq_register_swe_cb(hwdev, HINIC_STATELESS_EVENT,
+ hinic_nic_sw_aeqe_handler);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to register ucode aeqe handler\n");
+ goto register_ucode_aeqe_err;
+ }
+
+ set_bit(HINIC_HWDEV_COMM_CH_INITED, &hwdev->func_state);
+
+ return 0;
+
+register_ucode_aeqe_err:
+ hinic_unsync_mgmt_func_state(hwdev);
+sync_mgmt_func_err:
+ return err;
+
+cmdq_init_err:
+ if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF)
+ hinic_set_wq_page_size(hwdev, hinic_global_func_id(hwdev),
+ HINIC_HW_WQ_PAGE_SIZE);
+init_wq_pg_size_err:
+init_eqs_msix_err:
+ hinic_comm_ceqs_free(hwdev);
+
+ceqs_init_err:
+ if (IS_MULTI_HOST(hwdev))
+ hinic_multi_host_mgmt_free(hwdev);
+multi_host_mgmt_init_err:
+l2nic_reset_err:
+rectify_mode_err:
+get_func_info_err:
+func_to_func_init_err:
+ return err;
+
+msg_init_err:
+ hinic_comm_aeqs_free(hwdev);
+
+ return err;
+}
+
+static void __uninit_comm_module(struct hinic_hwdev *hwdev,
+ enum hinic_hwdev_init_state init_state)
+{
+ switch (init_state) {
+ case HINIC_HWDEV_COMM_CH_INITED:
+ hinic_aeq_unregister_swe_cb(hwdev,
+ HINIC_STATELESS_EVENT);
+ hinic_unsync_mgmt_func_state(hwdev);
+ break;
+ case HINIC_HWDEV_CMDQ_INITED:
+ hinic_comm_cmdqs_free(hwdev);
+ /* VF can set page size of 256K only, any other value
+ * will return error in pf, pf will set all vf's page
+ * size to 4K when disable sriov
+ */
+ if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF)
+ hinic_set_wq_page_size(hwdev,
+ hinic_global_func_id(hwdev),
+ HINIC_HW_WQ_PAGE_SIZE);
+ hinic_comm_ceqs_free(hwdev);
+
+ if (IS_MULTI_HOST(hwdev))
+ hinic_multi_host_mgmt_free(hwdev);
+ break;
+ case HINIC_HWDEV_MBOX_INITED:
+ hinic_comm_func_to_func_free(hwdev);
+ break;
+ case HINIC_HWDEV_MGMT_INITED:
+ hinic_comm_pf_to_mgmt_free(hwdev);
+ break;
+ case HINIC_HWDEV_AEQ_INITED:
+ hinic_comm_aeqs_free(hwdev);
+ break;
+ default:
+ break;
+ }
+}
+
+#define HINIC_FUNC_STATE_BUSY_TIMEOUT 300
+void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev)
+{
+ enum hinic_hwdev_init_state init_state = HINIC_HWDEV_COMM_CH_INITED;
+ int cnt;
+
+ while (init_state > HINIC_HWDEV_NONE_INITED) {
+ if (!test_bit(init_state, &hwdev->func_state)) {
+ init_state--;
+ continue;
+ }
+ clear_bit(init_state, &hwdev->func_state);
+
+ cnt = 0;
+ while (test_bit(HINIC_HWDEV_STATE_BUSY, &hwdev->func_state) &&
+ cnt++ <= HINIC_FUNC_STATE_BUSY_TIMEOUT)
+ usleep_range(900, 1000);
+
+ __uninit_comm_module(hwdev, init_state);
+
+ init_state--;
+ }
+}
+
+int hinic_slq_init(void *dev, int num_wqs)
+{
+ struct hinic_hwdev *hwdev = dev;
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ hwdev->wqs = kzalloc(sizeof(*hwdev->wqs), GFP_KERNEL);
+ if (!hwdev->wqs)
+ return -ENOMEM;
+
+ err = hinic_wqs_alloc(hwdev->wqs, num_wqs, hwdev->dev_hdl);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc wqs\n");
+ kfree(hwdev->wqs);
+ hwdev->wqs = NULL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_slq_init);
+
+void hinic_slq_uninit(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!hwdev)
+ return;
+
+ hinic_wqs_free(hwdev->wqs);
+
+ kfree(hwdev->wqs);
+}
+EXPORT_SYMBOL(hinic_slq_uninit);
+
+int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, u16 page_size,
+ u64 *cla_addr, void **handle)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_wq *wq;
+ int err;
+
+ if (!dev || !cla_addr || !handle)
+ return -EINVAL;
+
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return -ENOMEM;
+
+ err = hinic_wq_allocate(hwdev->wqs, wq, wqebb_size, hwdev->wq_page_size,
+ q_depth, 0);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc wq\n");
+ kfree(wq);
+ return -EFAULT;
+ }
+
+ *cla_addr = wq->block_paddr;
+ *handle = wq;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_slq_alloc);
+
+void hinic_slq_free(void *dev, void *handle)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!hwdev || !handle)
+ return;
+
+ hinic_wq_free(hwdev->wqs, handle);
+ kfree(handle);
+}
+EXPORT_SYMBOL(hinic_slq_free);
+
+u64 hinic_slq_get_addr(void *handle, u16 index)
+{
+ if (!handle)
+ return 0; /* NULL of wqe addr */
+
+ return (u64)hinic_get_wqebb_addr(handle, index);
+}
+EXPORT_SYMBOL(hinic_slq_get_addr);
+
+u64 hinic_slq_get_first_pageaddr(void *handle)
+{
+ struct hinic_wq *wq = handle;
+
+ if (!handle)
+ return 0; /* NULL of wqe addr */
+
+ return hinic_get_first_wqe_page_addr(wq);
+}
+EXPORT_SYMBOL(hinic_slq_get_first_pageaddr);
+
+int hinic_func_tmr_bitmap_set(void *hwdev, bool en)
+{
+ struct hinic_func_tmr_bitmap_op bitmap_op = {0};
+ u16 out_size = sizeof(bitmap_op);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ bitmap_op.func_idx = hinic_global_func_id(hwdev);
+ bitmap_op.ppf_idx = hinic_ppf_idx(hwdev);
+ if (en)
+ bitmap_op.op_id = FUNC_TMR_BITMAP_ENABLE;
+ else
+ bitmap_op.op_id = FUNC_TMR_BITMAP_DISABLE;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET,
+ &bitmap_op, sizeof(bitmap_op),
+ &bitmap_op, &out_size, 0);
+ if (err || !out_size || bitmap_op.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, bitmap_op.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_func_tmr_bitmap_set);
+
+int ppf_ht_gpa_set(struct hinic_hwdev *hwdev, struct hinic_page_addr *pg0,
+ struct hinic_page_addr *pg1)
+{
+ struct comm_info_ht_gpa_set ht_gpa_set = {0};
+ u16 out_size = sizeof(ht_gpa_set);
+ int ret;
+
+ pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ &pg0->phys_addr, GFP_KERNEL);
+ if (!pg0->virt_addr) {
+ sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n");
+ return -EFAULT;
+ }
+
+ pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ &pg1->phys_addr, GFP_KERNEL);
+ if (!pg1->virt_addr) {
+ sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n");
+ return -EFAULT;
+ }
+
+ ht_gpa_set.page_pa0 = pg0->phys_addr;
+ ht_gpa_set.page_pa1 = pg1->phys_addr;
+ sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx,
page_addr1.pa=0x%llx\n",
+ pg0->phys_addr, pg1->phys_addr);
+ ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PPF_HT_GPA_SET,
+ &ht_gpa_set, sizeof(ht_gpa_set),
+ &ht_gpa_set, &out_size, 0);
+ if (ret || !out_size || ht_gpa_set.status) {
+ sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x,
out_size: 0x%x\n",
+ ret, ht_gpa_set.status, out_size);
+ return -EFAULT;
+ }
+
+ hwdev->page_pa0.phys_addr = pg0->phys_addr;
+ hwdev->page_pa0.virt_addr = pg0->virt_addr;
+
+ hwdev->page_pa1.phys_addr = pg1->phys_addr;
+ hwdev->page_pa1.virt_addr = pg1->virt_addr;
+
+ return 0;
+}
+
+int hinic_ppf_ht_gpa_init(struct hinic_hwdev *hwdev)
+{
+ int ret;
+ int i;
+ int j;
+ int size;
+
+ struct hinic_page_addr page_addr0[HINIC_PPF_HT_GPA_SET_RETRY_TIMES];
+ struct hinic_page_addr page_addr1[HINIC_PPF_HT_GPA_SET_RETRY_TIMES];
+
+ size = HINIC_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]);
+ memset(page_addr0, 0, size);
+ memset(page_addr1, 0, size);
+
+ for (i = 0; i < HINIC_PPF_HT_GPA_SET_RETRY_TIMES; i++) {
+ ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]);
+ if (!ret)
+ break;
+ }
+
+ for (j = 0; j < i; j++) {
+ if (page_addr0[j].virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ page_addr0[j].virt_addr,
+ page_addr0[j].phys_addr);
+ page_addr0[j].virt_addr = NULL;
+ }
+ if (page_addr1[j].virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ page_addr1[j].virt_addr,
+ page_addr1[j].phys_addr);
+ page_addr1[j].virt_addr = NULL;
+ }
+ }
+
+ if (i >= HINIC_PPF_HT_GPA_SET_RETRY_TIMES) {
+ sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n",
+ i);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hinic_ppf_ht_gpa_deinit(struct hinic_hwdev *hwdev)
+{
+ if (hwdev->page_pa0.virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE,
+ hwdev->page_pa0.virt_addr,
+ hwdev->page_pa0.phys_addr);
+ hwdev->page_pa0.virt_addr = NULL;
+ }
+
+ if (hwdev->page_pa1.virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE,
+ hwdev->page_pa1.virt_addr,
+ hwdev->page_pa1.phys_addr);
+ hwdev->page_pa1.virt_addr = NULL;
+ }
+}
+
+static int set_ppf_tmr_status(struct hinic_hwdev *hwdev,
+ enum ppf_tmr_status status)
+{
+ struct hinic_ppf_tmr_op op = {0};
+ u16 out_size = sizeof(op);
+ int err = 0;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) != TYPE_PPF)
+ return -EFAULT;
+
+ if (status == HINIC_PPF_TMR_FLAG_START) {
+ err = hinic_ppf_ht_gpa_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n");
+ return -EFAULT;
+ }
+ } else {
+ hinic_ppf_ht_gpa_deinit(hwdev);
+ }
+
+ op.op_id = status;
+ op.ppf_idx = hinic_ppf_idx(hwdev);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PPF_TMR_SET, &op,
+ sizeof(op), &op, &out_size, 0);
+ if (err || !out_size || op.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, op.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_ppf_tmr_start(void *hwdev)
+{
+ if (!hwdev) {
+ pr_err("Hwdev pointer is NULL for starting ppf timer\n");
+ return -EINVAL;
+ }
+
+ return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_START);
+}
+EXPORT_SYMBOL(hinic_ppf_tmr_start);
+
+int hinic_ppf_tmr_stop(void *hwdev)
+{
+ if (!hwdev) {
+ pr_err("Hwdev pointer is NULL for stop ppf timer\n");
+ return -EINVAL;
+ }
+
+ return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_STOP);
+}
+EXPORT_SYMBOL(hinic_ppf_tmr_stop);
+
+int mqm_eqm_try_alloc_mem(struct hinic_hwdev *hwdev, u32 page_size,
+ u32 page_num)
+{
+ struct hinic_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr;
+ u32 valid_num = 0;
+ u32 flag = 1;
+ u32 i = 0;
+
+ for (i = 0; i < page_num; i++) {
+ page_addr->virt_addr =
+ dma_zalloc_coherent(hwdev->dev_hdl, page_size,
+ &page_addr->phys_addr, GFP_KERNEL);
+ if (!page_addr->virt_addr) {
+ flag = 0;
+ break;
+ }
+ valid_num++;
+ page_addr++;
+ }
+
+ if (flag == 1) {
+ hwdev->mqm_att.page_size = page_size;
+ hwdev->mqm_att.page_num = page_num;
+ } else {
+ page_addr = hwdev->mqm_att.brm_srch_page_addr;
+ for (i = 0; i < valid_num; i++) {
+ dma_free_coherent(hwdev->dev_hdl, page_size,
+ page_addr->virt_addr,
+ page_addr->phys_addr);
+ page_addr++;
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mqm_eqm_alloc_page_mem(struct hinic_hwdev *hwdev)
+{
+ int ret = 0;
+
+ /* apply for 64KB page, number is chunk_num/16 */
+ ret = mqm_eqm_try_alloc_mem(hwdev, 64 * 1024,
+ hwdev->mqm_att.chunk_num >> 4);
+ if (!ret)
+ return 0;
+
+ /* apply for 8KB page, number is chunk_num/2 */
+ ret = mqm_eqm_try_alloc_mem(hwdev, 8 * 1024,
+ hwdev->mqm_att.chunk_num >> 1);
+ if (!ret)
+ return 0;
+
+ /* apply for 4KB page, number is chunk_num */
+ ret = mqm_eqm_try_alloc_mem(hwdev, 4 * 1024,
+ hwdev->mqm_att.chunk_num);
+ if (!ret)
+ return 0;
+
+ return ret;
+}
+
+void mqm_eqm_free_page_mem(struct hinic_hwdev *hwdev)
+{
+ u32 i;
+ struct hinic_page_addr *page_addr;
+ u32 page_size;
+
+ page_size = hwdev->mqm_att.page_size;
+ page_addr = hwdev->mqm_att.brm_srch_page_addr;
+
+ for (i = 0; i < hwdev->mqm_att.page_num; i++) {
+ dma_free_coherent(hwdev->dev_hdl, page_size,
+ page_addr->virt_addr, page_addr->phys_addr);
+ page_addr++;
+ }
+}
+
+int mqm_eqm_set_cfg_2_hw(struct hinic_hwdev *hwdev, u32 valid)
+{
+ struct comm_info_eqm_cfg info_eqm_cfg = {0};
+ u16 out_size = sizeof(info_eqm_cfg);
+ int err;
+
+ info_eqm_cfg.page_size = hwdev->mqm_att.page_size;
+ info_eqm_cfg.ppf_id = hwdev->hwif->attr.func_global_idx;
+ info_eqm_cfg.valid = valid;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MQM_CFG_INFO_SET,
+ &info_eqm_cfg, sizeof(info_eqm_cfg),
+ &info_eqm_cfg, &out_size, 0);
+ if (err || !out_size || info_eqm_cfg.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, info_eqm_cfg.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define EQM_DATA_BUF_SIZE 1024
+
+int mqm_eqm_set_page_2_hw(struct hinic_hwdev *hwdev)
+{
+ struct comm_info_eqm_search_gpa *info;
+ struct hinic_page_addr *page_addr;
+ void *send_buf;
+ u16 send_buf_size;
+ u32 i;
+ u64 *gpa_hi52;
+ u64 gpa;
+ u32 num;
+ u32 start_idx;
+ int err = 0;
+ u32 valid_page_num;
+ u16 out_size;
+
+ send_buf_size = sizeof(struct comm_info_eqm_search_gpa) +
+ EQM_DATA_BUF_SIZE;
+ send_buf = kzalloc(send_buf_size, GFP_KERNEL);
+ if (!send_buf) {
+ sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n");
+ return -EFAULT;
+ }
+
+ page_addr = hwdev->mqm_att.brm_srch_page_addr;
+ info = (struct comm_info_eqm_search_gpa *)send_buf;
+ valid_page_num = 0;
+
+ gpa_hi52 = info->gpa_hi52;
+ num = 0;
+ start_idx = 0;
+ for (i = 0; i < hwdev->mqm_att.page_num; i++) {
+ gpa = page_addr->phys_addr >> 12;
+ gpa_hi52[num] = gpa;
+ num++;
+ if (num == 128) {
+ info->num = num;
+ info->start_idx = start_idx;
+ out_size = send_buf_size;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_MQM_SRCH_GPA_SET,
+ info, (u16)send_buf_size,
+ info, &out_size, 0);
+ if (err || !out_size || info->status) {
+ sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, info->status, out_size);
+ err = -EFAULT;
+ goto set_page_2_hw_end;
+ }
+
+ gpa_hi52 = info->gpa_hi52;
+ num = 0;
+ start_idx = i + 1;
+ }
+ page_addr++;
+ valid_page_num++;
+ }
+
+ if (0 != (valid_page_num & 0x7f)) {
+ info->num = num;
+ info->start_idx = start_idx;
+ out_size = send_buf_size;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_MQM_SRCH_GPA_SET,
+ info, (u16)send_buf_size,
+ info, &out_size, 0);
+ if (err || !out_size || info->status) {
+ sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, info->status, out_size);
+ err = -EFAULT;
+ goto set_page_2_hw_end;
+ }
+ }
+
+set_page_2_hw_end:
+ kfree(send_buf);
+ return err;
+}
+
+int mqm_eqm_init(struct hinic_hwdev *hwdev)
+{
+ struct comm_info_eqm_fix info_eqm_fix = {0};
+ u16 len = sizeof(info_eqm_fix);
+ int ret;
+
+ if (hwdev->hwif->attr.func_type != TYPE_PPF)
+ return 0;
+
+ ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MQM_FIX_INFO_GET,
+ &info_eqm_fix, sizeof(info_eqm_fix),
+ &info_eqm_fix, &len, 0);
+ if (ret || !len || info_eqm_fix.status) {
+ sdk_err(hwdev->dev_hdl, "Get mqm fix info fail,err: %d, status: 0x%x, out_size:
0x%x\n",
+ ret, info_eqm_fix.status, len);
+ return -EFAULT;
+ }
+ if (!(info_eqm_fix.chunk_num))
+ return 0;
+
+ hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num;
+ hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num;
+ hwdev->mqm_att.page_size = 0;
+ hwdev->mqm_att.page_num = 0;
+
+ hwdev->mqm_att.brm_srch_page_addr =
+ kcalloc(hwdev->mqm_att.chunk_num,
+ sizeof(struct hinic_page_addr), GFP_KERNEL);
+ if (!(hwdev->mqm_att.brm_srch_page_addr)) {
+ sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n");
+ return -EFAULT;
+ }
+
+ ret = mqm_eqm_alloc_page_mem(hwdev);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\r\n");
+ goto err_page;
+ }
+
+ ret = mqm_eqm_set_page_2_hw(hwdev);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n");
+ goto err_ecmd;
+ }
+
+ ret = mqm_eqm_set_cfg_2_hw(hwdev, 1);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n");
+ goto err_ecmd;
+ }
+
+ return 0;
+
+err_ecmd:
+ mqm_eqm_free_page_mem(hwdev);
+
+err_page:
+ kfree(hwdev->mqm_att.brm_srch_page_addr);
+
+ return ret;
+}
+
+void mqm_eqm_deinit(struct hinic_hwdev *hwdev)
+{
+ int ret;
+
+ if (hwdev->hwif->attr.func_type != TYPE_PPF)
+ return;
+
+ if (!(hwdev->mqm_att.chunk_num))
+ return;
+
+ mqm_eqm_free_page_mem(hwdev);
+ kfree(hwdev->mqm_att.brm_srch_page_addr);
+
+ ret = mqm_eqm_set_cfg_2_hw(hwdev, 0);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail! err: %d\n",
+ ret);
+ return;
+ }
+
+ hwdev->mqm_att.chunk_num = 0;
+ hwdev->mqm_att.search_gpa_num = 0;
+ hwdev->mqm_att.page_num = 0;
+ hwdev->mqm_att.page_size = 0;
+}
+
+int hinic_ppf_ext_db_init(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ret = mqm_eqm_init(hwdev);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "MQM eqm init fail!\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_ppf_ext_db_init);
+
+int hinic_ppf_ext_db_deinit(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (hwdev->hwif->attr.func_type != TYPE_PPF)
+ return -EFAULT;
+
+ mqm_eqm_deinit(hwdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_ppf_ext_db_deinit);
+
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct hinic_wq_page_size page_size_info = {0};
+ u16 out_size = sizeof(page_size_info);
+ int err;
+
+ page_size_info.func_idx = func_idx;
+ page_size_info.ppf_idx = hinic_ppf_idx(hwdev);
+ page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PAGESIZE_SET,
+ &page_size_info, sizeof(page_size_info),
+ &page_size_info, &out_size, 0);
+ if (err || !out_size || page_size_info.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x,
out_size: 0x%0x\n",
+ err, page_size_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+enum hinic_event_cmd {
+ /* hilink event */
+ HINIC_EVENT_LINK_STATUS_CHANGE = 1,
+ HINIC_EVENT_LINK_ERR,
+ HINIC_EVENT_CABLE_PLUG,
+ HINIC_EVENT_HILINK_INFO,
+ /* reserved for hilink */
+
+ /* driver event, pf & vf communicate */
+ HINIC_EVENT_HEARTBEAT_LOST = 31,
+ HINIC_EVENT_SET_VF_COS,
+
+ /* mgmt event */
+ HINIC_EVENT_MGMT_FAULT = 61,
+ HINIC_EVENT_MGMT_WATCHDOG,
+ HINIC_EVENT_MGMT_FMW_ACT_NTC,
+ HINIC_EVENT_MGMT_RESET,
+ HINIC_EVENT_MGMT_PCIE_DFX,
+ HINIC_EVENT_MCTP_HOST_INFO,
+ HINIC_EVENT_MGMT_HEARTBEAT_EHD,
+
+ HINIC_EVENT_MAX_TYPE,
+};
+
+struct hinic_event_convert {
+ u8 mod;
+ u8 cmd;
+
+ enum hinic_event_cmd event;
+};
+
+static struct hinic_event_convert __event_convert[] = {
+ /* hilink event */
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ .event = HINIC_EVENT_LINK_STATUS_CHANGE,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_LINK_ERR_EVENT,
+ .event = HINIC_EVENT_LINK_ERR,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_CABLE_PLUG_EVENT,
+ .event = HINIC_EVENT_CABLE_PLUG,
+ },
+ {
+ .mod = HINIC_MOD_HILINK,
+ .cmd = HINIC_HILINK_CMD_GET_LINK_INFO,
+ .event = HINIC_EVENT_HILINK_INFO,
+ },
+
+ /* driver triggered event */
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_MGMT_CMD_HEART_LOST_REPORT,
+ .event = HINIC_EVENT_HEARTBEAT_LOST,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_SET_VF_COS,
+ .event = HINIC_EVENT_SET_VF_COS,
+ },
+
+ /* mgmt event */
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_FAULT_REPORT,
+ .event = HINIC_EVENT_MGMT_FAULT,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_WATCHDOG_INFO,
+ .event = HINIC_EVENT_MGMT_WATCHDOG,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_FMW_ACT_NTC,
+ .event = HINIC_EVENT_MGMT_FMW_ACT_NTC,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_MGMT_RESET,
+ .event = HINIC_EVENT_MGMT_RESET,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_PCIE_DFX_NTC,
+ .event = HINIC_EVENT_MGMT_PCIE_DFX,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_GET_HOST_INFO,
+ .event = HINIC_EVENT_MCTP_HOST_INFO,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_HEARTBEAT_EVENT,
+ .event = HINIC_EVENT_MGMT_HEARTBEAT_EHD,
+ },
+};
+
+static enum hinic_event_cmd __get_event_type(u8 mod, u8 cmd)
+{
+ int idx;
+ int arr_size = sizeof(__event_convert) / sizeof(__event_convert[0]);
+
+ for (idx = 0; idx < arr_size; idx++) {
+ if (__event_convert[idx].mod == mod &&
+ __event_convert[idx].cmd == cmd)
+ return __event_convert[idx].event;
+ }
+
+ return HINIC_EVENT_MAX_TYPE;
+}
+
+bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd)
+{
+ if ((mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_GET_HOST_INFO) ||
+ (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_HEARTBEAT_EVENT))
+ return false;
+
+ if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC ||
+ mod == HINIC_MOD_HILINK)
+ return true;
+
+ return false;
+}
+
+#define FAULT_SHOW_STR_LEN 16
+static void fault_report_show(struct hinic_hwdev *hwdev,
+ struct hinic_fault_event *event)
+{
+ char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "chip", "ucode", "mem rd timeout", "mem wr
timeout",
+ "reg rd timeout", "reg wr timeout", "phy fault"};
+ char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "fatal", "reset", "flr", "general",
"suggestion"};
+ char type_str[FAULT_SHOW_STR_LEN + 1];
+ char level_str[FAULT_SHOW_STR_LEN + 1];
+ u8 level;
+ u32 pos, base;
+ struct hinic_fault_event_stats *fault;
+ u8 node_id;
+
+ sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %d.\n",
+ hinic_global_func_id(hwdev));
+
+ memset(type_str, 0, FAULT_SHOW_STR_LEN + 1);
+ if (event->type < FAULT_TYPE_MAX)
+ strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN);
+ else
+ strncpy(type_str, "Unknown", FAULT_SHOW_STR_LEN);
+
+ sdk_err(hwdev->dev_hdl, "Fault type: %d [%s]\n", event->type,
type_str);
+ sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x,
val[3]: 0x%08x\n",
+ event->event.val[0], event->event.val[1], event->event.val[2],
+ event->event.val[3]);
+
+ fault = &hwdev->hw_stats.fault_event_stats;
+
+ switch (event->type) {
+ case FAULT_TYPE_CHIP:
+ memset(level_str, 0, FAULT_SHOW_STR_LEN + 1);
+ level = event->event.chip.err_level;
+ if (level < FAULT_LEVEL_MAX)
+ strncpy(level_str, fault_level[level],
+ FAULT_SHOW_STR_LEN);
+ else
+ strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN);
+
+ if (level == FAULT_LEVEL_SERIOUS_FLR) {
+ sdk_err(hwdev->dev_hdl, "err_level: %d [%s], flr func_id: %d\n",
+ level, level_str, event->event.chip.func_id);
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ }
+ sdk_err(hwdev->dev_hdl, "module_id: 0x%x, err_type: 0x%x, err_level: %d[%s],
err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n",
+ event->event.chip.node_id,
+ event->event.chip.err_type, level, level_str,
+ event->event.chip.err_csr_addr,
+ event->event.chip.err_csr_value);
+
+ node_id = event->event.chip.node_id;
+ atomic_inc(&fault->chip_fault_stats[node_id][level]);
+
+ base = event->event.chip.node_id * FAULT_LEVEL_MAX *
+ HINIC_CHIP_ERROR_TYPE_MAX;
+ pos = base + HINIC_CHIP_ERROR_TYPE_MAX * level +
+ event->event.chip.err_type;
+ if (pos < HINIC_CHIP_FAULT_SIZE)
+ hwdev->chip_fault_stats[pos]++;
+ break;
+ case FAULT_TYPE_UCODE:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+
+ sdk_err(hwdev->dev_hdl, "cause_id: %d, core_id: %d, c_id: %d, epc:
0x%08x\n",
+ event->event.ucode.cause_id, event->event.ucode.core_id,
+ event->event.ucode.c_id, event->event.ucode.epc);
+ break;
+ case FAULT_TYPE_MEM_RD_TIMEOUT:
+ case FAULT_TYPE_MEM_WR_TIMEOUT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+
+ sdk_err(hwdev->dev_hdl, "err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab:
0x%08x, mem_index: 0x%08x\n",
+ event->event.mem_timeout.err_csr_ctrl,
+ event->event.mem_timeout.err_csr_data,
+ event->event.mem_timeout.ctrl_tab,
+ event->event.mem_timeout.mem_index);
+ break;
+ case FAULT_TYPE_REG_RD_TIMEOUT:
+ case FAULT_TYPE_REG_WR_TIMEOUT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ sdk_err(hwdev->dev_hdl, "err_csr: 0x%08x\n",
+ event->event.reg_timeout.err_csr);
+ break;
+ case FAULT_TYPE_PHY_FAULT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ sdk_err(hwdev->dev_hdl, "op_type: %u, port_id: %u, dev_ad: %u, csr_addr:
0x%08x, op_data: 0x%08x\n",
+ event->event.phy_fault.op_type,
+ event->event.phy_fault.port_id,
+ event->event.phy_fault.dev_ad,
+ event->event.phy_fault.csr_addr,
+ event->event.phy_fault.op_data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hinic_refresh_history_fault(struct hinic_hwdev *hwdev,
+ struct hinic_fault_recover_info *info)
+{
+ if (!hwdev->history_fault_flag) {
+ hwdev->history_fault_flag = true;
+ memcpy(&hwdev->history_fault, info,
+ sizeof(struct hinic_fault_recover_info));
+ } else {
+ if (hwdev->history_fault.fault_lev >= info->fault_lev)
+ memcpy(&hwdev->history_fault, info,
+ sizeof(struct hinic_fault_recover_info));
+ }
+}
+
+static void fault_event_handler(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_fault_event *fault_event;
+ struct hinic_event_info event_info;
+ struct hinic_fault_info_node *fault_node;
+
+ if (in_size != sizeof(*fault_event)) {
+ sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be
%ld.\n",
+ in_size, sizeof(*fault_event));
+ return;
+ }
+
+ fault_event = buf_in;
+ fault_report_show(hwdev, &fault_event->event);
+
+ if (hwdev->event_callback) {
+ event_info.type = HINIC_EVENT_FAULT;
+ memcpy(&event_info.info, &fault_event->event,
+ sizeof(event_info.info));
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ }
+
+ /* refresh history fault info */
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT)
+ fault_node->info.fault_src = fault_event->event.type;
+ else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT)
+ fault_node->info.fault_src = HINIC_FAULT_SRC_HW_PHY_FAULT;
+
+ if (fault_node->info.fault_src == HINIC_FAULT_SRC_HW_MGMT_CHIP)
+ fault_node->info.fault_lev =
+ fault_event->event.event.chip.err_level;
+ else
+ fault_node->info.fault_lev = FAULT_LEVEL_FATAL;
+
+ memcpy(&fault_node->info.fault_data.hw_mgmt, &fault_event->event.event,
+ sizeof(union hinic_fault_hw_mgmt));
+ hinic_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+
+ queue_work(hwdev->workq, &hwdev->fault_work);
+}
+
+static void heartbeat_lost_event_handler(struct hinic_hwdev *hwdev)
+{
+ struct hinic_fault_info_node *fault_node;
+ struct hinic_event_info event_info = {0};
+
+ atomic_inc(&hwdev->hw_stats.heart_lost_stats);
+ sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n",
+ hinic_global_func_id(hwdev));
+
+ if (hwdev->event_callback) {
+ event_info.type = HINIC_EVENT_HEART_LOST;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ }
+
+ /* refresh history fault info */
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ fault_node->info.fault_src = HINIC_FAULT_SRC_HOST_HEARTBEAT_LOST;
+ fault_node->info.fault_lev = FAULT_LEVEL_FATAL;
+ hinic_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+
+ queue_work(hwdev->workq, &hwdev->fault_work);
+}
+
+static void link_status_event_handler(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_status *link_status, *ret_link_status;
+ struct hinic_event_info event_info = {0};
+ struct hinic_event_link_info *link_info = &event_info.link_info;
+ struct nic_port_info port_info = {0};
+ int err;
+
+ /* Ignore link change event */
+ if (FUNC_FORCE_LINK_UP(hwdev))
+ return;
+
+ link_status = buf_in;
+ sdk_info(hwdev->dev_hdl, "Link status report received, func_id: %d, status:
%d\n",
+ hinic_global_func_id(hwdev), link_status->link);
+
+ if (link_status->link)
+ atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats);
+ else
+ atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats);
+
+ /* link event reported only after set vport enable */
+ if (hinic_func_type(hwdev) != TYPE_VF &&
+ link_status->link == HINIC_EVENT_LINK_UP) {
+ err = hinic_get_port_info(hwdev, &port_info);
+ if (err) {
+ nic_warn(hwdev->dev_hdl, "Failed to get port info\n");
+ } else {
+ link_info->valid = 1;
+ link_info->port_type = port_info.port_type;
+ link_info->autoneg_cap = port_info.autoneg_cap;
+ link_info->autoneg_state = port_info.autoneg_state;
+ link_info->duplex = port_info.duplex;
+ link_info->speed = port_info.speed;
+ hinic_refresh_nic_cfg(hwdev, &port_info);
+ }
+ }
+
+ if (!hwdev->event_callback)
+ return;
+
+ event_info.type = link_status->link ?
+ HINIC_EVENT_LINK_UP : HINIC_EVENT_LINK_DOWN;
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ hinic_notify_all_vfs_link_changed(hwdev, link_status->link);
+ ret_link_status = buf_out;
+ ret_link_status->status = 0;
+ *out_size = sizeof(*ret_link_status);
+ }
+}
+
+static void module_status_event(struct hinic_hwdev *hwdev,
+ enum hinic_event_cmd cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_cable_plug_event *plug_event;
+ struct hinic_link_err_event *link_err;
+ struct hinic_event_info event_info = {0};
+
+ event_info.type = HINIC_EVENT_PORT_MODULE_EVENT;
+
+ if (cmd == HINIC_EVENT_CABLE_PLUG) {
+ plug_event = buf_in;
+
+ event_info.module_event.type = plug_event->plugged ?
+ HINIC_PORT_MODULE_CABLE_PLUGGED :
+ HINIC_PORT_MODULE_CABLE_UNPLUGGED;
+
+ *out_size = sizeof(*plug_event);
+ plug_event = buf_out;
+ plug_event->status = 0;
+ } else if (cmd == HINIC_EVENT_LINK_ERR) {
+ link_err = buf_in;
+
+ event_info.module_event.type = HINIC_PORT_MODULE_LINK_ERR;
+ event_info.module_event.err_type = link_err->err_type;
+
+ *out_size = sizeof(*link_err);
+ link_err = buf_out;
+ link_err->status = 0;
+ } else {
+ sdk_warn(hwdev->dev_hdl, "Unknown module event: %d\n", cmd);
+ return;
+ }
+
+ if (!hwdev->event_callback)
+ return;
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+}
+
+void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_event_info event_info = {0};
+
+ sdk_info(hwdev->dev_hdl, "DCB %s, default cos %d, up2cos
%d%d%d%d%d%d%d%d\n",
+ dcb_state->dcb_on ? "on" : "off", dcb_state->default_cos,
+ dcb_state->up_cos[0], dcb_state->up_cos[1],
+ dcb_state->up_cos[2], dcb_state->up_cos[3],
+ dcb_state->up_cos[4], dcb_state->up_cos[5],
+ dcb_state->up_cos[6], dcb_state->up_cos[7]);
+
+ /* Saved in sdk for statefull module */
+ hinic_save_dcb_state(hwdev, dcb_state);
+
+ if (!hwdev->event_callback)
+ return;
+
+ event_info.type = HINIC_EVENT_DCB_STATE_CHANGE;
+ memcpy(&event_info.dcb_state, dcb_state, sizeof(event_info.dcb_state));
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+}
+
+static void sw_watchdog_timeout_info_show(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_mgmt_watchdog_info *watchdog_info;
+ u32 *dump_addr, *reg, stack_len, i, j;
+
+ if (in_size != sizeof(*watchdog_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be
%ld.\n",
+ in_size, sizeof(*watchdog_info));
+ return;
+ }
+
+ watchdog_info = buf_in;
+
+ sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp:
0x%x\n",
+ watchdog_info->curr_time_h, watchdog_info->curr_time_l,
+ watchdog_info->task_id, watchdog_info->sp);
+ sdk_err(hwdev->dev_hdl, "Stack current used: 0x%x, peak used: 0x%x, overflow
flag: 0x%x, top: 0x%x, bottom: 0x%x\n",
+ watchdog_info->curr_used, watchdog_info->peak_used,
+ watchdog_info->is_overflow, watchdog_info->stack_top,
+ watchdog_info->stack_bottom);
+
+ sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x\n",
+ watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr);
+
+ sdk_err(hwdev->dev_hdl, "Mgmt register info\n");
+
+ for (i = 0; i < 3; i++) {
+ reg = watchdog_info->reg + (u64)(u32)(4 * i);
+ sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(reg), *(reg + 1), *(reg + 2), *(reg + 3));
+ }
+
+ sdk_err(hwdev->dev_hdl, "0x%08x\n", watchdog_info->reg[12]);
+
+ if (watchdog_info->stack_actlen <= 1024) {
+ stack_len = watchdog_info->stack_actlen;
+ } else {
+ sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n",
+ watchdog_info->stack_actlen);
+ stack_len = 1024;
+ }
+
+ sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16Bytes per line(start from
sp)\n");
+ for (i = 0; i < (stack_len / 16); i++) {
+ dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16)));
+ sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *dump_addr, *(dump_addr + 1), *(dump_addr + 2),
+ *(dump_addr + 3));
+ }
+
+ for (j = 0; j < ((stack_len % 16) / 4); j++) {
+ dump_addr = (u32 *)(watchdog_info->data +
+ ((u64)(u32)(i * 16 + j * 4)));
+ sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr);
+ }
+
+ *out_size = sizeof(*watchdog_info);
+ watchdog_info = buf_out;
+ watchdog_info->status = 0;
+}
+
+static void mgmt_watchdog_timeout_event_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_fault_info_node *fault_node;
+
+ sw_watchdog_timeout_info_show(hwdev, buf_in, in_size,
+ buf_out, out_size);
+
+ /* refresh history fault info */
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ fault_node->info.fault_src = HINIC_FAULT_SRC_MGMT_WATCHDOG;
+ fault_node->info.fault_lev = FAULT_LEVEL_FATAL;
+ hinic_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+
+ queue_work(hwdev->workq, &hwdev->fault_work);
+}
+
+static void mgmt_reset_event_handler(struct hinic_hwdev *hwdev)
+{
+ sdk_info(hwdev->dev_hdl, "Mgmt is reset\n");
+
+ /* mgmt reset only occurred when hot update or Mgmt deadloop,
+ * if Mgmt deadloop, mgmt will report an event with
+ * mod=0, cmd=0x56, and will reported fault to os,
+ * so mgmt reset event don't need to report fault
+ */
+}
+
+static void hinic_fmw_act_ntc_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_event_info event_info = {0};
+ struct hinic_fmw_act_ntc *notice_info;
+
+ if (in_size != sizeof(*notice_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d,
should be %ld.\n",
+ in_size, sizeof(*notice_info));
+ return;
+ }
+
+ /* mgmt is activated now, restart heartbeat enhanced detection */
+ __set_heartbeat_ehd_detect_delay(hwdev, 0);
+
+ if (!hwdev->event_callback)
+ return;
+
+ event_info.type = HINIC_EVENT_FMW_ACT_NTC;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ *out_size = sizeof(*notice_info);
+ notice_info = buf_out;
+ notice_info->status = 0;
+}
+
+static void hinic_pcie_dfx_event_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_pcie_dfx_ntc *notice_info = buf_in;
+ struct hinic_pcie_dfx_info *dfx_info;
+ u16 size = 0;
+ u16 cnt = 0;
+ u32 num = 0;
+ u32 i, j;
+ int err;
+ u32 *reg;
+
+ if (in_size != sizeof(*notice_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d,
should be %ld.\n",
+ in_size, sizeof(*notice_info));
+ return;
+ }
+
+ dfx_info = kzalloc(sizeof(*dfx_info), GFP_KERNEL);
+ if (!dfx_info) {
+ sdk_err(hwdev->dev_hdl, "Malloc dfx_info memory failed\n");
+ return;
+ }
+
+ ((struct hinic_pcie_dfx_ntc *)buf_out)->status = 0;
+ *out_size = sizeof(*notice_info);
+ num = (u32)(notice_info->len / 1024);
+ sdk_info(hwdev->dev_hdl, "INFO LEN: %d\n", notice_info->len);
+ sdk_info(hwdev->dev_hdl, "PCIE DFX:\n");
+ dfx_info->host_id = 0;
+ for (i = 0; i < num; i++) {
+ dfx_info->offset = i * MAX_PCIE_DFX_BUF_SIZE;
+ if (i == (num - 1))
+ dfx_info->last = 1;
+ size = sizeof(*dfx_info);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PCIE_DFX_GET,
+ dfx_info, sizeof(*dfx_info),
+ dfx_info, &size, 0);
+ if (err || dfx_info->status || !size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, dfx_info->status, size);
+ kfree(dfx_info);
+ return;
+ }
+
+ reg = (u32 *)dfx_info->data;
+ for (j = 0; j < 256; j = j + 8) {
+ /*lint -save -e661 -e662*/
+ sdk_info(hwdev->dev_hdl, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x
0x%08x 0x%08x\n",
+ cnt, reg[j], reg[(u32)(j + 1)],
+ reg[(u32)(j + 2)], reg[(u32)(j + 3)],
+ reg[(u32)(j + 4)], reg[(u32)(j + 5)],
+ reg[(u32)(j + 6)], reg[(u32)(j + 7)]);
+ /*lint -restore*/
+ cnt = cnt + 32;
+ }
+ memset(dfx_info->data, 0, MAX_PCIE_DFX_BUF_SIZE);
+ }
+ kfree(dfx_info);
+}
+
+struct hinic_mctp_get_host_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 huawei_cmd;
+ u8 sub_cmd;
+ u8 rsvd[2];
+
+ u32 actual_len;
+
+ u8 data[1024];
+};
+
+static void hinic_mctp_get_host_info_event_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_event_info event_info = {0};
+ struct hinic_mctp_get_host_info *mctp_out, *mctp_in;
+ struct hinic_mctp_host_info *host_info;
+
+ if (in_size != sizeof(*mctp_in)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt mctp info, length: %d, should be
%ld\n",
+ in_size, sizeof(*mctp_in));
+ return;
+ }
+
+ *out_size = sizeof(*mctp_out);
+ mctp_out = buf_out;
+ mctp_out->status = 0;
+
+ if (!hwdev->event_callback) {
+ mctp_out->status = HINIC_MGMT_STATUS_ERR_INIT;
+ return;
+ }
+
+ mctp_in = buf_in;
+ host_info = &event_info.mctp_info;
+ host_info->major_cmd = mctp_in->huawei_cmd;
+ host_info->sub_cmd = mctp_in->sub_cmd;
+ host_info->data = mctp_out->data;
+
+ event_info.type = HINIC_EVENT_MCTP_GET_HOST_INFO;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ mctp_out->actual_len = host_info->data_len;
+}
+
+char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"RS-FEC", "BASE-FEC",
"NO-FEC"};
+
+char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = {
+ "Unknown", "Fibre", "Electric", "Direct Attach
Copper", "AOC",
+ "Back plane", "BaseT"
+};
+
+static void __print_cable_info(struct hinic_hwdev *hwdev,
+ struct hinic_link_info *info)
+{
+ char tmp_str[512] = {0};
+ char tmp_vendor[17] = {0};
+ char *port_type = "Unknown port type";
+ int i;
+
+ if (info->cable_absent) {
+ sdk_info(hwdev->dev_hdl, "Cable unpresent\n");
+ return;
+ }
+
+ if (info->port_type < LINK_PORT_MAX_TYPE)
+ port_type = __hw_to_char_port_type[info->port_type];
+ else
+ sdk_info(hwdev->dev_hdl, "Unknown port type: %u\n",
+ info->port_type);
+ if (info->port_type == LINK_PORT_FIBRE) {
+ if (info->port_sub_type == FIBRE_SUBTYPE_SR)
+ port_type = "Fibre-SR";
+ else if (info->port_sub_type == FIBRE_SUBTYPE_LR)
+ port_type = "Fibre-LR";
+ }
+
+ for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) {
+ if (info->vendor_name[i] == ' ')
+ info->vendor_name[i] = '\0';
+ else
+ break;
+ }
+
+ memcpy(tmp_vendor, info->vendor_name,
+ sizeof(info->vendor_name));
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps",
+ tmp_vendor, info->sfp_type ? "SFP" : "QSFP", port_type,
+ info->cable_length, info->cable_max_speed);
+ if (info->port_type != LINK_PORT_COPPER)
+ snprintf(tmp_str, sizeof(tmp_str) - 1,
+ "%s, Temperature: %u", tmp_str,
+ info->cable_temp);
+
+ sdk_info(hwdev->dev_hdl, "Cable information: %s\n",
+ tmp_str);
+}
+
+static void __hi30_lane_info(struct hinic_hwdev *hwdev,
+ struct hilink_lane *lane)
+{
+ struct hi30_ffe_data *ffe_data;
+ struct hi30_ctle_data *ctle_data;
+
+ ffe_data = (struct hi30_ffe_data *)lane->hi30_ffe;
+ ctle_data = (struct hi30_ctle_data *)lane->hi30_ctle;
+
+ sdk_info(hwdev->dev_hdl, "TX_FFE: PRE1=%s%d; PRE2=%s%d; MAIN=%d; POST1=%s%d;
POST1X=%s%d\n",
+ (ffe_data->PRE1 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE1 & 0xf),
+ (ffe_data->PRE2 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE2 & 0xf),
+ (int)ffe_data->MAIN,
+ (ffe_data->POST1 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST1 & 0xf),
+ (ffe_data->POST2 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST2 & 0xf));
+ sdk_info(hwdev->dev_hdl, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u;
Zero1~3=%u %u %u; Squelch1~3=%u %u %u\n",
+ ctle_data->ctlebst[0], ctle_data->ctlebst[1],
+ ctle_data->ctlebst[2], ctle_data->ctlecmband[0],
+ ctle_data->ctlecmband[1], ctle_data->ctlecmband[2],
+ ctle_data->ctlermband[0], ctle_data->ctlermband[1],
+ ctle_data->ctlermband[2], ctle_data->ctleza[0],
+ ctle_data->ctleza[1], ctle_data->ctleza[2]);
+}
+
+static void __print_hi30_status(struct hinic_hwdev *hwdev,
+ struct hinic_link_info *info)
+{
+ struct hilink_lane *lane;
+ int lane_used_num = 0, i;
+
+ for (i = 0; i < HILINK_MAX_LANE; i++) {
+ lane = (struct hilink_lane *)(info->lane2 + i * sizeof(*lane));
+ if (!lane->lane_used)
+ continue;
+
+ __hi30_lane_info(hwdev, lane);
+ lane_used_num++;
+ }
+
+ /* in new firmware, all lane info setted in lane2 */
+ if (lane_used_num)
+ return;
+
+ /* compatible old firmware */
+ __hi30_lane_info(hwdev, (struct hilink_lane *)info->lane1);
+}
+
+static void __print_link_info(struct hinic_hwdev *hwdev,
+ struct hinic_link_info *info,
+ enum hilink_info_print_event type)
+{
+ char *fec = "None";
+
+ if (info->fec < HILINK_FEC_MAX_TYPE)
+ fec = __hw_to_char_fec[info->fec];
+ else
+ sdk_info(hwdev->dev_hdl, "Unknown fec type: %u\n",
+ info->fec);
+
+ if (type == HILINK_EVENT_LINK_UP || !info->an_state) {
+ sdk_info(hwdev->dev_hdl, "Link information: speed %dGbps, %s, autoneg
%s\n",
+ info->speed, fec, info->an_state ? "on" : "off");
+ } else {
+ sdk_info(hwdev->dev_hdl, "Link information: antoneg: %s\n",
+ info->an_state ? "on" : "off");
+ }
+}
+
+static char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = {
+ "", "link up", "link down", "cable plugged"
+};
+
+void print_hilink_info(struct hinic_hwdev *hwdev,
+ enum hilink_info_print_event type,
+ struct hinic_link_info *info)
+{
+ __print_cable_info(hwdev, info);
+
+ __print_link_info(hwdev, info, type);
+
+ __print_hi30_status(hwdev, info);
+
+ if (type == HILINK_EVENT_LINK_UP)
+ return;
+
+ if (type == HILINK_EVENT_CABLE_PLUGGED) {
+ sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u\n",
+ info->alos, info->rx_los);
+ return;
+ }
+
+ sdk_info(hwdev->dev_hdl, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug info
reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n",
+ info->pma_status == 1 ? "off" : "on",
+ info->mac_tx_en ? "enable" : "disable",
+ info->mac_rx_en ? "enable" : "disable",
info->pma_dbg_info_reg,
+ info->pma_signal_ok_reg, info->rf_lf_status_reg);
+ sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x, PCS
link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n",
+ info->alos, info->rx_los, info->pcs_err_blk_cnt_reg,
+ info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt);
+}
+
+static int hinic_print_hilink_info(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hilink_link_info *hilink_info = buf_in;
+ struct hinic_link_info *info;
+ enum hilink_info_print_event type;
+
+ if (in_size != sizeof(*hilink_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid hilink info message size %d, should be
%ld\n",
+ in_size, sizeof(*hilink_info));
+ return -EINVAL;
+ }
+
+ ((struct hinic_hilink_link_info *)buf_out)->status = 0;
+ *out_size = sizeof(*hilink_info);
+
+ info = &hilink_info->info;
+ type = hilink_info->info_type;
+
+ if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) {
+ sdk_info(hwdev->dev_hdl, "Invalid hilink info report, type: %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ sdk_info(hwdev->dev_hdl, "Hilink info report after %s\n",
+ hilink_info_report_type[type]);
+
+ print_hilink_info(hwdev, type, info);
+
+ return 0;
+}
+
+int hinic_hilink_info_show(struct hinic_hwdev *hwdev)
+{
+ struct hinic_link_info hilink_info = { {0} };
+ int err;
+
+ err = hinic_get_hilink_link_info(hwdev, &hilink_info);
+ if (err) {
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED)
+ sdk_info(hwdev->dev_hdl, "Unsupport to get hilink info\n");
+ return err;
+ }
+
+ if (hilink_info.cable_absent) {
+ sdk_info(hwdev->dev_hdl, "Cable unpresent\n");
+ return 0;
+ }
+
+ sdk_info(hwdev->dev_hdl, "Current state of hilink info:\n");
+ print_hilink_info(hwdev, HILINK_EVENT_MAX_TYPE, &hilink_info);
+
+ return 0;
+}
+
+static void mgmt_heartbeat_enhanced_event(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_heartbeat_event *hb_event = buf_in;
+ struct hinic_heartbeat_event *hb_event_out = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+
+ if (in_size != sizeof(*hb_event)) {
+ sdk_err(dev->dev_hdl, "Invalid data size from mgmt for heartbeat event:
%d\n",
+ in_size);
+ return;
+ }
+
+ if (dev->heartbeat_ehd.last_heartbeat != hb_event->heart) {
+ dev->heartbeat_ehd.last_update_jiffies = jiffies;
+ dev->heartbeat_ehd.last_heartbeat = hb_event->heart;
+ }
+
+ hb_event_out->drv_heart = HEARTBEAT_DRV_MAGIC_ACK;
+
+ hb_event_out->status = 0;
+ *out_size = sizeof(*hb_event_out);
+}
+
+/* public process for this event:
+ * pf link change event
+ * pf heart lost event ,TBD
+ * pf fault report event
+ * vf link change event
+ * vf heart lost event, TBD
+ * vf fault report event, TBD
+ */
+static void _event_handler(struct hinic_hwdev *hwdev, enum hinic_event_cmd cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_vf_dcb_state *vf_dcb;
+
+ if (!hwdev)
+ return;
+
+ *out_size = 0;
+
+ switch (cmd) {
+ case HINIC_EVENT_LINK_STATUS_CHANGE:
+ link_status_event_handler(hwdev, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_CABLE_PLUG:
+ case HINIC_EVENT_LINK_ERR:
+ module_status_event(hwdev, cmd, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_HILINK_INFO:
+ hinic_print_hilink_info(hwdev, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_MGMT_FAULT:
+ fault_event_handler(hwdev, buf_in, in_size, buf_out, out_size);
+ break;
+
+ case HINIC_EVENT_HEARTBEAT_LOST:
+ heartbeat_lost_event_handler(hwdev);
+ break;
+
+ case HINIC_EVENT_SET_VF_COS:
+ vf_dcb = buf_in;
+ if (!vf_dcb)
+ break;
+
+ hinic_notify_dcb_state_event(hwdev, &vf_dcb->state);
+
+ break;
+
+ case HINIC_EVENT_MGMT_WATCHDOG:
+ mgmt_watchdog_timeout_event_handler(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_EVENT_MGMT_RESET:
+ mgmt_reset_event_handler(hwdev);
+ break;
+
+ case HINIC_EVENT_MGMT_FMW_ACT_NTC:
+ hinic_fmw_act_ntc_handler(hwdev, buf_in, in_size, buf_out,
+ out_size);
+
+ break;
+
+ case HINIC_EVENT_MGMT_PCIE_DFX:
+ hinic_pcie_dfx_event_handler(hwdev, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_MCTP_HOST_INFO:
+ hinic_mctp_get_host_info_event_handler(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_EVENT_MGMT_HEARTBEAT_EHD:
+ mgmt_heartbeat_enhanced_event(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ default:
+ sdk_warn(hwdev->dev_hdl, "Unsupported event %d to process\n",
+ cmd);
+ break;
+ }
+}
+
+/* vf link change event
+ * vf fault report event, TBD
+ */
+static int vf_nic_event_handler(void *hwdev, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport L2NIC event: cmd %d\n", cmd);
+ *out_size = 0;
+ return -EINVAL;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+
+ return 0;
+}
+
+static int vf_comm_event_handler(void *hwdev, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_COMM, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport COMM event: cmd %d\n", cmd);
+ *out_size = 0;
+ return -EFAULT;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+
+ return 0;
+}
+
+/* pf link change event */
+static void pf_nic_event_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport L2NIC event: cmd %d\n", cmd);
+ *out_size = 0;
+ return;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+}
+
+static void pf_hilink_event_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_HILINK, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport HILINK event: cmd %d\n", cmd);
+ *out_size = 0;
+ return;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+}
+
+/* pf fault report event */
+void pf_fault_event_handler(void *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_FAULT, buf_in,
+ in_size, buf_out, out_size);
+}
+
+void mgmt_watchdog_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_WATCHDOG, buf_in,
+ in_size, buf_out, out_size);
+}
+
+void mgmt_fmw_act_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_FMW_ACT_NTC, buf_in,
+ in_size, buf_out, out_size);
+}
+
+void mgmt_pcie_dfx_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_PCIE_DFX, buf_in,
+ in_size, buf_out, out_size);
+}
+
+void mgmt_get_mctp_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MCTP_HOST_INFO, buf_in,
+ in_size, buf_out, out_size);
+}
+
+void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_HEARTBEAT_EHD, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void pf_event_register(struct hinic_hwdev *hwdev)
+{
+ if (hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) {
+ hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC,
+ hwdev, pf_nic_event_handler);
+ hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK,
+ hwdev,
+ pf_hilink_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_FAULT_REPORT,
+ pf_fault_event_handler);
+
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_WATCHDOG_INFO,
+ mgmt_watchdog_event_handler);
+
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_FMW_ACT_NTC,
+ mgmt_fmw_act_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_PCIE_DFX_NTC,
+ mgmt_pcie_dfx_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_GET_HOST_INFO,
+ mgmt_get_mctp_event_handler);
+ }
+}
+
+void hinic_event_register(void *dev, void *pri_handle,
+ hinic_event_handler callback)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for register event\n");
+ return;
+ }
+
+ hwdev->event_callback = callback;
+ hwdev->event_pri_handle = pri_handle;
+
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ pf_event_register(hwdev);
+ } else {
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ vf_nic_event_handler);
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ vf_comm_event_handler);
+ }
+}
+
+void hinic_event_unregister(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ hwdev->event_callback = NULL;
+ hwdev->event_pri_handle = NULL;
+
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC);
+ hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_FAULT_REPORT);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_WATCHDOG_INFO);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_FMW_ACT_NTC);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_PCIE_DFX_NTC);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_GET_HOST_INFO);
+ } else {
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ }
+}
+
+/* 0 - heartbeat lost, 1 - normal */
+static u8 hinic_get_heartbeat_status(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u32 attr1;
+
+ /* suprise remove should be set 1 */
+ if (!hinic_get_chip_present_flag(hwdev))
+ return 1;
+
+ attr1 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR1_ADDR);
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n");
+ hinic_set_chip_absent(hwdev);
+ hinic_force_complete_all(hwdev);
+ /* :should notify chiperr to pangea
+ * when detecting pcie link down
+ */
+ return 1;
+ }
+
+ return HINIC_AF1_GET(attr1, MGMT_INIT_STATUS);
+}
+
+static void hinic_heartbeat_event_handler(struct work_struct *work)
+{
+ struct hinic_hwdev *hwdev =
+ container_of(work, struct hinic_hwdev, timer_work);
+ u16 out = 0;
+
+ _event_handler(hwdev, HINIC_EVENT_HEARTBEAT_LOST,
+ NULL, 0, &out, &out);
+}
+
+static bool __detect_heartbeat_ehd_lost(struct hinic_hwdev *hwdev)
+{
+ struct hinic_heartbeat_enhanced *hb_ehd = &hwdev->heartbeat_ehd;
+ u64 update_time;
+ bool hb_ehd_lost = false;
+
+ if (!hb_ehd->en)
+ return false;
+
+ if (time_after(jiffies, hb_ehd->start_detect_jiffies)) {
+ update_time = jiffies_to_msecs(jiffies -
+ hb_ehd->last_update_jiffies);
+ if (update_time > HINIC_HEARBEAT_ENHANCED_LOST) {
+ sdk_warn(hwdev->dev_hdl, "Heartbeat enhanced lost for %d millisecond\n",
+ (u32)update_time);
+ hb_ehd_lost = true;
+ }
+ } else {
+ /* mgmt may not report heartbeart enhanced event and won't
+ * update last_update_jiffies
+ */
+ hb_ehd->last_update_jiffies = jiffies;
+ }
+
+ return hb_ehd_lost;
+}
+
+#ifdef HAVE_TIMER_SETUP
+static void hinic_heartbeat_timer_handler(struct timer_list *t)
+#else
+static void hinic_heartbeat_timer_handler(unsigned long data)
+#endif
+{
+#ifdef HAVE_TIMER_SETUP
+ struct hinic_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer);
+#else
+ struct hinic_hwdev *hwdev = (struct hinic_hwdev *)data;
+#endif
+
+ if (__detect_heartbeat_ehd_lost(hwdev) ||
+ !hinic_get_heartbeat_status(hwdev)) {
+ hwdev->heartbeat_lost = 1;
+ stop_timer(&hwdev->heartbeat_timer);
+ queue_work(hwdev->workq, &hwdev->timer_work);
+ } else {
+ mod_timer(&hwdev->heartbeat_timer,
+ jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_PERIOD));
+ }
+}
+
+void hinic_init_heartbeat(struct hinic_hwdev *hwdev)
+{
+#ifdef HAVE_TIMER_SETUP
+ timer_setup(&hwdev->heartbeat_timer, hinic_heartbeat_timer_handler, 0);
+#else
+ initialize_timer(hwdev->adapter_hdl, &hwdev->heartbeat_timer);
+ hwdev->heartbeat_timer.data = (unsigned long)hwdev;
+ hwdev->heartbeat_timer.function = hinic_heartbeat_timer_handler;
+#endif
+ hwdev->heartbeat_timer.expires =
+ jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_START_EXPIRE);
+
+ add_to_timer(&hwdev->heartbeat_timer, HINIC_HEARTBEAT_PERIOD);
+
+ INIT_WORK(&hwdev->timer_work, hinic_heartbeat_event_handler);
+}
+
+void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev)
+{
+ destroy_work(&hwdev->timer_work);
+ stop_timer(&hwdev->heartbeat_timer);
+ delete_timer(&hwdev->heartbeat_timer);
+}
+
+u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data)
+{
+ struct hinic_hwdev *hwdev = (struct hinic_hwdev *)handle;
+ u8 event_level = FAULT_LEVEL_MAX;
+
+ switch (event) {
+ case HINIC_INTERNAL_TSO_FATAL_ERROR:
+ case HINIC_INTERNAL_LRO_FATAL_ERROR:
+ case HINIC_INTERNAL_TX_FATAL_ERROR:
+ case HINIC_INTERNAL_RX_FATAL_ERROR:
+ case HINIC_INTERNAL_OTHER_FATAL_ERROR:
+ atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[event]);
+ sdk_err(hwdev->dev_hdl, "SW aeqe event type: 0x%x, data: 0x%llx\n",
+ event, data);
+ event_level = FAULT_LEVEL_FATAL;
+ break;
+ default:
+ sdk_err(hwdev->dev_hdl, "Unsupported sw event %d to process.\n",
+ event);
+ }
+
+ return event_level;
+}
+
+struct hinic_fast_recycled_mode {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 fast_recycled_mode; /* 1: enable fast recycle, available
+ * in dpdk mode,
+ * 0: normal mode, available in kernel
+ * nic mode
+ */
+ u8 rsvd1;
+};
+
+int hinic_enable_fast_recycle(void *hwdev, bool enable)
+{
+ struct hinic_fast_recycled_mode fast_recycled_mode = {0};
+ u16 out_size = sizeof(fast_recycled_mode);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ fast_recycled_mode.func_id = hinic_global_func_id(hwdev);
+ fast_recycled_mode.fast_recycled_mode = enable ? 1 : 0;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET,
+ &fast_recycled_mode,
+ sizeof(fast_recycled_mode),
+ &fast_recycled_mode, &out_size, 0);
+ if (err || fast_recycled_mode.status || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set recycle mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, fast_recycled_mode.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hinic_set_pcie_order_cfg(void *handle)
+{
+ struct hinic_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev)
+ return;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_GLB_DMA_SO_RO_REPLACE_ADDR);
+
+ if (HINIC_GLB_DMA_SO_RO_GET(val, SO_RO_CFG)) {
+ val = HINIC_GLB_DMA_SO_R0_CLEAR(val, SO_RO_CFG);
+ val |= HINIC_GLB_DMA_SO_R0_SET(HINIC_DISABLE_ORDER, SO_RO_CFG);
+ hinic_hwif_write_reg(hwdev->hwif,
+ HINIC_GLB_DMA_SO_RO_REPLACE_ADDR, val);
+ }
+}
+
+int _set_led_status(struct hinic_hwdev *hwdev, u8 port,
+ enum hinic_led_type type,
+ enum hinic_led_mode mode, u8 reset)
+{
+ struct hinic_led_info led_info = {0};
+ u16 out_size = sizeof(led_info);
+ int err;
+
+ led_info.port = port;
+ led_info.reset = reset;
+
+ led_info.type = type;
+ led_info.mode = mode;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SET_LED_STATUS,
+ &led_info, sizeof(led_info),
+ &led_info, &out_size, 0);
+ if (err || led_info.status || !out_size) {
+ sdk_err(hwdev->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, led_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type,
+ enum hinic_led_mode mode)
+{
+ int err;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ err = _set_led_status(hwdev, port, type, mode, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic_reset_led_status(void *hwdev, u8 port)
+{
+ int err;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID,
+ HINIC_LED_MODE_INVALID, 1);
+ if (err) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to reset led status\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info)
+{
+ struct hinic_comm_board_info board_info = {0};
+ u16 out_size = sizeof(board_info);
+ int err;
+
+ if (!hwdev || !info)
+ return -EINVAL;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_BOARD_INFO,
+ &board_info, sizeof(board_info),
+ &board_info, &out_size, 0);
+ if (err || board_info.status || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, board_info.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(info, &board_info.info, sizeof(*info));
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_board_info);
+
+int hinic_get_phy_init_status(void *hwdev,
+ enum phy_init_status_type *init_status)
+{
+ struct hinic_phy_init_status phy_info = {0};
+ u16 out_size = sizeof(phy_info);
+ int err;
+
+ if (!hwdev || !init_status)
+ return -EINVAL;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_PHY_INIT_STATUS,
+ &phy_info, sizeof(phy_info),
+ &phy_info, &out_size, 0);
+ if ((phy_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ phy_info.status) || err || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get phy info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, phy_info.status, out_size);
+ return -EFAULT;
+ }
+
+ *init_status = phy_info.init_status;
+
+ return phy_info.status;
+}
+
+int hinic_phy_init_status_judge(void *hwdev)
+{
+ enum phy_init_status_type init_status;
+ int ret;
+ unsigned long end;
+
+ /* It's not a phy, so don't judge phy status */
+ if (!HINIC_BOARD_IS_PHY((struct hinic_hwdev *)hwdev))
+ return 0;
+
+ end = jiffies + msecs_to_jiffies(PHY_DOING_INIT_TIMEOUT);
+ do {
+ ret = hinic_get_phy_init_status(hwdev, &init_status);
+ if (ret == HINIC_MGMT_CMD_UNSUPPORTED)
+ return 0;
+ else if (ret)
+ return -EFAULT;
+
+ switch (init_status) {
+ case PHY_INIT_SUCCESS:
+ sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is success\n");
+ return 0;
+ case PHY_NONSUPPORT:
+ sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is nonsupport\n");
+ return 0;
+ case PHY_INIT_FAIL:
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is failed\n");
+ return -EIO;
+ case PHY_INIT_DOING:
+ msleep(250);
+ break;
+ default:
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is invalid, init_status: %d\n",
+ init_status);
+ return -EINVAL;
+ }
+ } while (time_before(jiffies, end));
+
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is timeout\n");
+
+ return -ETIMEDOUT;
+}
+
+static void hinic_set_mgmt_channel_status(void *handle, bool state)
+{
+ struct hinic_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev || hinic_func_type(hwdev) == TYPE_VF ||
+ !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG))
+ return;
+
+ val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR);
+ val = HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
+ val |= HINIC_SET_MGMT_CHANNEL_STATUS((u32)state, MGMT_CHANNEL_STATUS);
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR, val);
+}
+
+int hinic_get_mgmt_channel_status(void *handle)
+{
+ struct hinic_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev)
+ return true;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG))
+ return false;
+
+ val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR);
+
+ return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
+}
+
+static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_update_active *active_info = buf_out;
+
+ if (!active_info || hinic_func_type(hwdev) == TYPE_VF ||
+ !(dev->feature_cap & HINIC_FUNC_SUPP_DFX_REG))
+ return;
+
+ if (!active_info->status &&
+ (active_info->update_status & HINIC_ACTIVE_STATUS_MASK)) {
+ active_info->update_status &= HINIC_ACTIVE_STATUS_CLEAR;
+ return;
+ }
+
+ hinic_set_mgmt_channel_status(hwdev, false);
+}
+
+int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_bios_cfg_cmd cfg = {0};
+ u16 out_size = sizeof(cfg);
+ int err;
+
+ if (!hwdev || !pf_bw_limit)
+ return -EINVAL;
+
+ if (HINIC_FUNC_TYPE(dev) == TYPE_VF ||
+ !FUNC_SUPPORT_RATE_LIMIT(hwdev))
+ return 0;
+
+ cfg.func_valid = 1;
+ cfg.func_idx = (u8)hinic_global_func_id(hwdev);
+
+ cfg.op_code = HINIC_BIOS_CFG_GET | HINIC_BIOS_CFG_PF_BW_LIMIT;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT,
+ &cfg, sizeof(cfg),
+ &cfg, &out_size, 0);
+ if (err || cfg.status || !out_size) {
+ sdk_err(dev->dev_hdl, "Failed to get bios pf bandwidth limit, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, cfg.status, out_size);
+ return -EIO;
+ }
+
+ /* Check data is valid or not */
+ if (cfg.signature != 0x19e51822) {
+ sdk_err(dev->dev_hdl, "Invalid bios configureration data, signature:
0x%x\n",
+ cfg.signature);
+ return -EINVAL;
+ }
+
+ if (cfg.pf_bw_limit > 100) {
+ sdk_err(dev->dev_hdl, "Invalid bios cfg pf bandwidth limit: %d\n",
+ cfg.pf_bw_limit);
+ return -EINVAL;
+ }
+
+ *pf_bw_limit = cfg.pf_bw_limit;
+
+ return 0;
+}
+
+bool hinic_get_ppf_status(void *hwdev)
+{
+ struct hinic_ppf_state ppf_state = {0};
+ struct hinic_hwdev *dev = hwdev;
+ struct card_node *chip_node;
+ u16 out_size = sizeof(ppf_state);
+ int err;
+
+ if (!hwdev)
+ return false;
+
+ chip_node = (struct card_node *)dev->chip_node;
+
+ if (!HINIC_IS_VF(dev))
+ return chip_node->ppf_state;
+
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_PPF_STATE,
+ &ppf_state, sizeof(ppf_state),
+ &ppf_state, &out_size, 0);
+ if (err || ppf_state.status || !out_size) {
+ sdk_err(dev->dev_hdl, "Failed to get ppf state, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, ppf_state.status, out_size);
+ return false;
+ }
+
+ return (bool)ppf_state.ppf_state;
+}
+
+#define HINIC_RED_REG_TIME_OUT 3000
+
+int hinic_read_reg(void *hwdev, u32 reg_addr, u32 *val)
+{
+ struct hinic_reg_info reg_info = {0};
+ u16 out_size = sizeof(reg_info);
+ int err;
+
+ if (!hwdev || !val)
+ return -EINVAL;
+
+ reg_info.reg_addr = reg_addr;
+ reg_info.val_length = sizeof(u32);
+
+ err = hinic_pf_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_REG_READ,
+ ®_info, sizeof(reg_info),
+ ®_info, &out_size,
+ HINIC_RED_REG_TIME_OUT);
+ if (reg_info.status || err || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to read reg, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, reg_info.status, out_size);
+ return -EFAULT;
+ }
+
+ *val = reg_info.data[0];
+
+ return 0;
+}
+
+static void hinic_exec_recover_cb(struct hinic_hwdev *hwdev,
+ struct hinic_fault_recover_info *info)
+{
+ if (!hinic_get_chip_present_flag(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Device surprised removed, abort recover\n");
+ return;
+ }
+
+ if (info->fault_lev >= FAULT_LEVEL_MAX) {
+ sdk_err(hwdev->dev_hdl, "Invalid fault level\n");
+ return;
+ }
+
+ down(&hwdev->recover_sem);
+ if (hwdev->recover_cb) {
+ if (info->fault_lev <= FAULT_LEVEL_SERIOUS_FLR)
+ hinic_set_fast_recycle_status(hwdev);
+
+ hwdev->recover_cb(hwdev->recover_pri_hd, *info);
+ }
+ up(&hwdev->recover_sem);
+}
+
+void hinic_fault_work_handler(struct work_struct *work)
+{
+ struct hinic_hwdev *hwdev =
+ container_of(work, struct hinic_hwdev, fault_work);
+
+ down(&hwdev->fault_list_sem);
+ up(&hwdev->fault_list_sem);
+}
+
+void hinic_swe_fault_handler(struct hinic_hwdev *hwdev, u8 level,
+ u8 event, u64 val)
+{
+ struct hinic_fault_info_node *fault_node;
+
+ if (level < FAULT_LEVEL_MAX) {
+ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL);
+ if (!fault_node) {
+ sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n");
+ return;
+ }
+
+ fault_node->info.fault_src = HINIC_FAULT_SRC_SW_MGMT_UCODE;
+ fault_node->info.fault_lev = level;
+ fault_node->info.fault_data.sw_mgmt.event_id = event;
+ fault_node->info.fault_data.sw_mgmt.event_data = val;
+ hinic_refresh_history_fault(hwdev, &fault_node->info);
+
+ down(&hwdev->fault_list_sem);
+ kfree(fault_node);
+ up(&hwdev->fault_list_sem);
+
+ queue_work(hwdev->workq, &hwdev->fault_work);
+ }
+}
+
+int hinic_register_fault_recover(void *hwdev, void *pri_handle,
+ hinic_fault_recover_handler cb)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev || !pri_handle || !cb) {
+ pr_err("Invalid input parameters when register fault recover handler\n");
+ return -EINVAL;
+ }
+
+ down(&dev->recover_sem);
+ dev->recover_pri_hd = pri_handle;
+ dev->recover_cb = cb;
+ up(&dev->recover_sem);
+
+ if (dev->history_fault_flag)
+ hinic_exec_recover_cb(dev, &dev->history_fault);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_register_fault_recover);
+
+int hinic_unregister_fault_recover(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev) {
+ pr_err("Invalid input parameters when unregister fault recover handler\n");
+ return -EINVAL;
+ }
+
+ down(&dev->recover_sem);
+ dev->recover_pri_hd = NULL;
+ dev->recover_cb = NULL;
+ up(&dev->recover_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_unregister_fault_recover);
+
+void hinic_set_func_deinit_flag(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ set_bit(HINIC_HWDEV_FUNC_DEINIT, &dev->func_state);
+}
+
+int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos)
+{
+ struct hinic_hw_pf_infos_cmd pf_infos = {0};
+ u16 out_size = sizeof(pf_infos);
+ int err;
+
+ if (!hwdev || !infos)
+ return -EINVAL;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_HW_PF_INFOS,
+ &pf_infos, sizeof(pf_infos),
+ &pf_infos, &out_size, 0);
+ if ((pf_infos.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ pf_infos.status) || err || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get hw pf information, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pf_infos.status, out_size);
+ return -EFAULT;
+ }
+
+ if (!pf_infos.status)
+ memcpy(infos, &pf_infos.infos, sizeof(*infos));
+
+ return pf_infos.status;
+}
+EXPORT_SYMBOL(hinic_get_hw_pf_infos);
+
+int hinic_set_ip_check(void *hwdev, bool ip_check_ctl)
+{
+ u32 val = 0;
+ int ret;
+ int i;
+
+ if (!hwdev || hinic_func_type(hwdev) == TYPE_VF)
+ return -EINVAL;
+
+ for (i = 0; i <= HINIC_IPSU_CHANNEL_NUM; i++) {
+ ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU,
+ (HINIC_IPSU_CHANNEL0_ADDR +
+ i * HINIC_IPSU_CHANNEL_OFFSET), &val);
+ if (ret)
+ return ret;
+
+ val = be32_to_cpu(val);
+ if (ip_check_ctl)
+ val |= HINIC_IPSU_DIP_SIP_MASK;
+ else
+ val &= (~HINIC_IPSU_DIP_SIP_MASK);
+
+ val = cpu_to_be32(val);
+ ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU,
+ (HINIC_IPSU_CHANNEL0_ADDR +
+ i * HINIC_IPSU_CHANNEL_OFFSET), val);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int hinic_get_card_present_state(void *hwdev, bool *card_present_state)
+{
+ u32 addr, attr1;
+
+ if (!hwdev || !card_present_state)
+ return -EINVAL;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr);
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "Card is not
present\n");
+ *card_present_state = (bool)0;
+ } else {
+ *card_present_state = (bool)1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_card_present_state);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
new file mode 100644
index 000000000000..ddec645d493a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
@@ -0,0 +1,377 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HWDEV_H_
+#define HINIC_HWDEV_H_
+
+#include "hinic_port_cmd.h"
+
+/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */
+#define HINIC_DEFAULT_WQ_PAGE_SIZE 0x40000
+#define HINIC_HW_WQ_PAGE_SIZE 0x1000
+
+#define HINIC_MSG_TO_MGMT_MAX_LEN 2016
+
+struct cfg_mgmt_info;
+struct rdma_comp_resource;
+
+struct hinic_hwif;
+struct hinic_nic_io;
+struct hinic_wqs;
+struct hinic_aeqs;
+struct hinic_ceqs;
+struct hinic_mbox_func_to_func;
+struct hinic_msg_pf_to_mgmt;
+struct hinic_cmdqs;
+struct hinic_multi_host_mgmt;
+
+struct hinic_root_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u8 lro_en;
+ u8 rsvd2;
+ u8 ppf_idx;
+ u8 rsvd3;
+ u16 rq_depth;
+ u16 rx_buf_sz;
+ u16 sq_depth;
+};
+
+struct hinic_page_addr {
+ void *virt_addr;
+ u64 phys_addr;
+};
+
+struct mqm_addr_trans_tbl_info {
+ u32 chunk_num;
+ u32 search_gpa_num;
+ u32 page_size;
+ u32 page_num;
+ struct hinic_page_addr *brm_srch_page_addr;
+};
+
+#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF
+
+#define HINIC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000)
+#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE
+
+#define HINIC_HW_WQ_NAME "hinic_hardware"
+#define HINIC_HEARTBEAT_PERIOD 1000
+#define HINIC_HEARTBEAT_START_EXPIRE 5000
+
+#define HINIC_CHIP_ERROR_TYPE_MAX 1024
+#define HINIC_CHIP_FAULT_SIZE \
+ (HINIC_NODE_ID_MAX * FAULT_LEVEL_MAX * HINIC_CHIP_ERROR_TYPE_MAX)
+
+enum hinic_node_id {
+ HINIC_NODE_ID_IPSU = 4,
+ HINIC_NODE_ID_MGMT_HOST = 21, /*Host CPU send API to uP */
+ HINIC_NODE_ID_MAX = 22
+};
+
+#define HINIC_HWDEV_INIT_MODES_MASK ((1 << HINIC_HWDEV_ALL_INITED) - 1)
+
+enum hinic_hwdev_func_state {
+ HINIC_HWDEV_FUNC_INITED = HINIC_HWDEV_ALL_INITED,
+
+ HINIC_HWDEV_FUNC_DEINIT,
+
+ HINIC_HWDEV_STATE_BUSY = 31,
+};
+
+struct hinic_cqm_stats {
+ atomic_t cqm_cmd_alloc_cnt;
+ atomic_t cqm_cmd_free_cnt;
+ atomic_t cqm_send_cmd_box_cnt;
+ atomic_t cqm_send_cmd_imm_cnt;
+ atomic_t cqm_db_addr_alloc_cnt;
+ atomic_t cqm_db_addr_free_cnt;
+
+ atomic_t cqm_fc_srq_create_cnt;
+ atomic_t cqm_srq_create_cnt;
+ atomic_t cqm_rq_create_cnt;
+
+ atomic_t cqm_qpc_mpt_create_cnt;
+ atomic_t cqm_nonrdma_queue_create_cnt;
+ atomic_t cqm_rdma_queue_create_cnt;
+ atomic_t cqm_rdma_table_create_cnt;
+
+ atomic_t cqm_qpc_mpt_delete_cnt;
+ atomic_t cqm_nonrdma_queue_delete_cnt;
+ atomic_t cqm_rdma_queue_delete_cnt;
+ atomic_t cqm_rdma_table_delete_cnt;
+
+ atomic_t cqm_func_timer_clear_cnt;
+ atomic_t cqm_func_hash_buf_clear_cnt;
+
+ atomic_t cqm_scq_callback_cnt;
+ atomic_t cqm_ecq_callback_cnt;
+ atomic_t cqm_nocq_callback_cnt;
+ atomic_t cqm_aeq_callback_cnt[112];
+};
+
+struct hinic_link_event_stats {
+ atomic_t link_down_stats;
+ atomic_t link_up_stats;
+};
+
+struct hinic_fault_event_stats {
+ atomic_t chip_fault_stats[HINIC_NODE_ID_MAX][FAULT_LEVEL_MAX];
+ atomic_t fault_type_stat[FAULT_TYPE_MAX];
+ atomic_t pcie_fault_stats;
+};
+
+struct hinic_hw_stats {
+ atomic_t heart_lost_stats;
+ atomic_t nic_ucode_event_stats[HINIC_NIC_FATAL_ERROR_MAX];
+ struct hinic_cqm_stats cqm_stats;
+ struct hinic_link_event_stats link_event_stats;
+ struct hinic_fault_event_stats fault_event_stats;
+};
+
+struct hinic_fault_info_node {
+ struct list_head list;
+ struct hinic_hwdev *hwdev;
+ struct hinic_fault_recover_info info;
+};
+
+enum heartbeat_support_state {
+ HEARTBEAT_NOT_SUPPORT = 0,
+ HEARTBEAT_SUPPORT,
+};
+
+/* 25s for max 5 heartbeat event lost */
+#define HINIC_HEARBEAT_ENHANCED_LOST 25000
+struct hinic_heartbeat_enhanced {
+ bool en; /* enable enhanced heartbeat or not */
+
+ unsigned long last_update_jiffies;
+ u32 last_heartbeat;
+
+ unsigned long start_detect_jiffies;
+};
+
+#define HINIC_NORMAL_HOST_CAP (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \
+ HINIC_FUNC_SUPP_RATE_LIMIT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \
+ HINIC_FUNC_SUPP_CHANGE_MAC)
+#define HINIC_MULTI_BM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \
+ HINIC_FUNC_SUPP_CHANGE_MAC)
+#define HINIC_MULTI_BM_SLAVE (HINIC_FUNC_SRIOV_EN_DFLT | \
+ HINIC_FUNC_SRIOV_NUM_FIX | \
+ HINIC_FUNC_FORCE_LINK_UP | \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP)
+#define HINIC_MULTI_VM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \
+ HINIC_FUNC_SUPP_CHANGE_MAC)
+#define HINIC_MULTI_VM_SLAVE (HINIC_FUNC_MGMT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SRIOV_EN_DFLT | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_CHANGE_MAC)
+
+#define MULTI_HOST_CHIP_MODE_SHIFT 0
+#define MULTI_HOST_MASTER_MBX_STS_SHIFT 0x4
+#define MULTI_HOST_PRIV_DATA_SHIFT 0x8
+
+#define MULTI_HOST_CHIP_MODE_MASK 0xF
+#define MULTI_HOST_MASTER_MBX_STS_MASK 0xF
+#define MULTI_HOST_PRIV_DATA_MASK 0xFFFF
+
+#define MULTI_HOST_REG_SET(val, member) \
+ (((val) & MULTI_HOST_##member##_MASK) \
+ << MULTI_HOST_##member##_SHIFT)
+#define MULTI_HOST_REG_GET(val, member) \
+ (((val) >> MULTI_HOST_##member##_SHIFT) \
+ & MULTI_HOST_##member##_MASK)
+#define MULTI_HOST_REG_CLEAR(val, member) \
+ ((val) & (~(MULTI_HOST_##member##_MASK \
+ << MULTI_HOST_##member##_SHIFT)))
+
+#define HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE 12
+
+enum hinic_chip_mode {
+ CHIP_MODE_NORMAL,
+ CHIP_MODE_BMGW,
+ CHIP_MODE_VMGW,
+};
+
+/* new version of roce qp not limited by power of 2 */
+#define HINIC_CMD_VER_ROCE_QP 1
+/* new version for add function id in multi-host */
+#define HINIC_CMD_VER_FUNC_ID 2
+
+struct hinic_hwdev {
+ void *adapter_hdl; /* pointer to hinic_pcidev or NDIS_Adapter */
+ void *pcidev_hdl; /* pointer to pcidev or Handler */
+ void *dev_hdl; /* pointer to pcidev->dev or Handler, for
+ * sdk_err() or dma_alloc()
+ */
+ u32 wq_page_size;
+
+ void *cqm_hdl;
+ void *chip_node;
+
+ struct hinic_hwif *hwif; /* include void __iomem *bar */
+ struct hinic_nic_io *nic_io;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct rdma_comp_resource *rdma_comp_res;
+ struct hinic_wqs *wqs; /* for FC slq */
+ struct mqm_addr_trans_tbl_info mqm_att;
+
+ struct hinic_aeqs *aeqs;
+ struct hinic_ceqs *ceqs;
+
+ struct hinic_mbox_func_to_func *func_to_func;
+
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+
+ struct hinic_cmdqs *cmdqs;
+
+ struct hinic_page_addr page_pa0;
+ struct hinic_page_addr page_pa1;
+
+ hinic_event_handler event_callback;
+ void *event_pri_handle;
+
+ struct semaphore recover_sem;
+ bool collect_log_flag;
+ bool history_fault_flag;
+ struct hinic_fault_recover_info history_fault;
+ void *recover_pri_hd;
+ hinic_fault_recover_handler recover_cb;
+
+ struct work_struct fault_work;
+ struct semaphore fault_list_sem;
+
+ struct work_struct timer_work;
+ struct workqueue_struct *workq;
+ struct timer_list heartbeat_timer;
+ /* true represent heartbeat lost, false represent heartbeat restore */
+ u32 heartbeat_lost;
+ int chip_present_flag;
+ struct hinic_heartbeat_enhanced heartbeat_ehd;
+ struct hinic_hw_stats hw_stats;
+ u8 *chip_fault_stats;
+
+ u32 statufull_ref_cnt;
+ ulong func_state;
+
+ u64 feature_cap; /* enum hinic_func_cap */
+ enum hinic_func_mode func_mode;
+
+ struct hinic_multi_host_mgmt *mhost_mgmt;
+
+ /* In bmgw x86 host, driver can't send message to mgmt cpu directly,
+ * need to trasmit message ppf mbox to bmgw arm host.
+ */
+ struct semaphore ppf_sem;
+ void *ppf_hwdev;
+
+ struct hinic_board_info board_info;
+#define MGMT_VERSION_MAX_LEN 32
+ u8 mgmt_ver[MGMT_VERSION_MAX_LEN];
+};
+
+int hinic_init_comm_ch(struct hinic_hwdev *hwdev);
+
+void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev);
+
+int hinic_ppf_ext_db_init(void *dev);
+
+int hinic_ppf_ext_db_deinit(void *dev);
+
+enum hinic_set_arm_type {
+ HINIC_SET_ARM_CMDQ,
+ HINIC_SET_ARM_SQ,
+ HINIC_SET_ARM_TYPE_NUM,
+};
+
+int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id);
+
+void hinic_set_chip_present(void *hwdev);
+void hinic_force_complete_all(void *hwdev);
+
+void hinic_init_heartbeat(struct hinic_hwdev *hwdev);
+void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev);
+
+u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data);
+
+int hinic_enable_fast_recycle(void *hwdev, bool enable);
+int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag);
+
+enum l2nic_resource_type {
+ RES_TYPE_NIC_FUNC = 0,
+ RES_TYPE_FLUSH_BIT,
+ RES_TYPE_PF_BW_CFG,
+ RES_TYPE_MQM,
+ RES_TYPE_SMF,
+ RES_TYPE_CMDQ_ROOTCTX,
+ RES_TYPE_SQ_CI_TABLE,
+ RES_TYPE_CEQ,
+ RES_TYPE_MBOX,
+ RES_TYPE_AEQ,
+};
+
+void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state);
+
+int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit);
+
+void hinic_fault_work_handler(struct work_struct *work);
+void hinic_swe_fault_handler(struct hinic_hwdev *hwdev, u8 level,
+ u8 event, u64 val);
+
+bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd);
+
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+
+int hinic_phy_init_status_judge(void *hwdev);
+
+int hinic_hilink_info_show(struct hinic_hwdev *hwdev);
+extern int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val);
+extern int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val);
+
+int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+#define HINIC_SDI_MODE_UNKNOWN 0
+#define HINIC_SDI_MODE_BM 1
+#define HINIC_SDI_MODE_VM 2
+#define HINIC_SDI_MODE_MAX 3
+int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode);
+
+void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
new file mode 100644
index 000000000000..2b3f66e30072
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
@@ -0,0 +1,800 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/io-mapping.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+
+#define WAIT_HWIF_READY_TIMEOUT 10000
+
+#define HINIC_SELFTEST_RESULT 0x883C
+
+/* For UEFI driver, this function can only read BAR0 */
+u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
+{
+ return be32_to_cpu(readl(hwif->cfg_regs_base + reg));
+}
+
+/* For UEFI driver, this function can only write BAR0 */
+void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val)
+{
+ writel(cpu_to_be32(val), hwif->cfg_regs_base + reg);
+}
+
+/**
+ * hwif_ready - test if the HW initialization passed
+ * @hwif: the hardware interface of a pci function device
+ * Return: 0 - success, negative - failure
+ **/
+static int hwif_ready(struct hinic_hwdev *hwdev)
+{
+ u32 addr, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwdev->hwif, addr);
+
+ if (attr1 == HINIC_PCIE_LINK_DOWN)
+ return -EBUSY;
+
+ if (!HINIC_AF1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ if (HINIC_IS_VF(hwdev)) {
+ if (!HINIC_AF1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int wait_hwif_ready(struct hinic_hwdev *hwdev)
+{
+ ulong timeout = 0;
+
+ do {
+ if (!hwif_ready(hwdev))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n");
+ return -EBUSY;
+}
+
+/**
+ * set_hwif_attr - set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ * @attr0: the first attribute that was read from the hw
+ * @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
+ **/
+static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
+{
+ hwif->attr.func_global_idx = HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ hwif->attr.port_to_port_idx = HINIC_AF0_GET(attr0, P2P_IDX);
+ hwif->attr.pci_intf_idx = HINIC_AF0_GET(attr0, PCI_INTF_IDX);
+ hwif->attr.vf_in_pf = HINIC_AF0_GET(attr0, VF_IN_PF);
+ hwif->attr.func_type = HINIC_AF0_GET(attr0, FUNC_TYPE);
+
+ hwif->attr.ppf_idx = HINIC_AF1_GET(attr1, PPF_IDX);
+
+ hwif->attr.num_aeqs = BIT(HINIC_AF1_GET(attr1, AEQS_PER_FUNC));
+ hwif->attr.num_ceqs = BIT(HINIC_AF1_GET(attr1, CEQS_PER_FUNC));
+ hwif->attr.num_irqs = BIT(HINIC_AF1_GET(attr1, IRQS_PER_FUNC));
+ hwif->attr.num_dma_attr = BIT(HINIC_AF1_GET(attr1, DMA_ATTR_PER_FUNC));
+
+ hwif->attr.global_vf_id_of_pf = HINIC_AF2_GET(attr2,
+ GLOBAL_VF_ID_OF_PF);
+}
+
+/**
+ * get_hwif_attr - read and set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void get_hwif_attr(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0, attr1, attr2;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hinic_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
+}
+
+void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status)
+{
+ u32 attr5 = HINIC_AF5_SET(status, PF_STATUS);
+ u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR;
+
+ if (hwif->attr.func_type == TYPE_VF)
+ return;
+
+ hinic_hwif_write_reg(hwif, addr, attr5);
+}
+
+enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif)
+{
+ u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
+
+ return HINIC_AF5_GET(attr5, PF_STATUS);
+}
+
+enum hinic_doorbell_ctrl hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+enum hinic_outbound_ctrl hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_AF4_GET(attr4, OUTBOUND_CTRL);
+}
+
+void hinic_enable_doorbell(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HINIC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hinic_disable_doorbell(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HINIC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hinic_enable_outbound(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL);
+ attr4 |= HINIC_AF4_SET(ENABLE_OUTBOUND, OUTBOUND_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hinic_disable_outbound(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL);
+ attr4 |= HINIC_AF4_SET(DISABLE_OUTBOUND, OUTBOUND_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+/**
+ * set_ppf - try to set hwif as ppf and set the type of hwif in this case
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void set_ppf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 addr, val, ppf_election;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_PPF_ELECTION_ADDR;
+
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
+
+ ppf_election = HINIC_PPF_ELECTION_SET(attr->func_global_idx, IDX);
+ val |= ppf_election;
+
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ /* Check PPF */
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
+ if (attr->ppf_idx == attr->func_global_idx)
+ attr->func_type = TYPE_PPF;
+}
+
+/**
+ * get_mpf - get the mpf index into the hwif
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void get_mpf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 mpf_election, addr;
+
+ addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR;
+
+ mpf_election = hinic_hwif_read_reg(hwif, addr);
+ attr->mpf_idx = HINIC_MPF_ELECTION_GET(mpf_election, IDX);
+}
+
+/**
+ * set_mpf - try to set hwif as mpf and set the mpf idx in hwif
+ * @hwif: the hardware interface of a pci function device
+ **/
+static void set_mpf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 addr, val, mpf_election;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR;
+
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ val = HINIC_MPF_ELECTION_CLEAR(val, IDX);
+ mpf_election = HINIC_MPF_ELECTION_SET(attr->func_global_idx, IDX);
+
+ val |= mpf_election;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
+{
+ u32 i;
+
+ for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
+ free_db_area->db_idx[i] = i;
+
+ free_db_area->num_free = HINIC_DB_MAX_AREAS;
+
+ spin_lock_init(&free_db_area->idx_lock);
+}
+
+static int get_db_idx(struct hinic_hwif *hwif, u32 *idx)
+{
+ struct hinic_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 pos;
+ u32 pg_idx;
+
+ spin_lock(&free_db_area->idx_lock);
+
+retry:
+ if (free_db_area->num_free == 0) {
+ spin_unlock(&free_db_area->idx_lock);
+ return -ENOMEM;
+ }
+
+ free_db_area->num_free--;
+
+ pos = free_db_area->alloc_pos++;
+ pos &= HINIC_DB_MAX_AREAS - 1;
+
+ pg_idx = free_db_area->db_idx[pos];
+
+ free_db_area->db_idx[pos] = 0xFFFFFFFF;
+
+ /* pg_idx out of range */
+ if (pg_idx >= HINIC_DB_MAX_AREAS)
+ goto retry;
+
+ spin_unlock(&free_db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hinic_hwif *hwif, u32 idx)
+{
+ struct hinic_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 pos;
+
+ if (idx >= HINIC_DB_MAX_AREAS)
+ return;
+
+ spin_lock(&free_db_area->idx_lock);
+
+ pos = free_db_area->return_pos++;
+ pos &= HINIC_DB_MAX_AREAS - 1;
+
+ free_db_area->db_idx[pos] = idx;
+
+ free_db_area->num_free++;
+
+ spin_unlock(&free_db_area->idx_lock);
+}
+
+void hinic_free_db_addr(void *hwdev, void __iomem *db_base,
+ void __iomem *dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u32 idx;
+
+ if (!hwdev || !db_base)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ idx = DB_IDX(db_base, hwif->db_base);
+
+ /* No need to unmap */
+
+ free_db_idx(hwif, idx);
+}
+EXPORT_SYMBOL(hinic_free_db_addr);
+
+int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u64 offset;
+ u32 idx;
+ int err;
+
+ if (!hwdev || !db_base)
+ return -EINVAL;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return -EFAULT;
+
+ *db_base = hwif->db_base + idx * HINIC_DB_PAGE_SIZE;
+
+ if (!dwqe_base)
+ return 0;
+
+ offset = ((u64)idx) << PAGE_SHIFT;
+
+ *dwqe_base = hwif->dwqe_mapping + offset;
+
+ if (!(*dwqe_base)) {
+ hinic_free_db_addr(hwdev, *db_base, NULL);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_db_addr);
+
+void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u32 idx;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ idx = DB_IDX(db_base, hwif->db_base_phy);
+
+ free_db_idx(hwif, idx);
+}
+EXPORT_SYMBOL(hinic_free_db_phy_addr);
+
+int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u32 idx;
+ int err;
+
+ if (!hwdev || !db_base || !dwqe_base)
+ return -EINVAL;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return -EFAULT;
+
+ *db_base = hwif->db_base_phy + idx * HINIC_DB_PAGE_SIZE;
+ *dwqe_base = *db_base + HINIC_DB_DWQE_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_db_phy_addr);
+
+void hinic_set_msix_state(void *hwdev, u16 msix_idx, enum hinic_msix_state flag)
+{
+ struct hinic_hwif *hwif;
+ u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE +
+ HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+ mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ if (flag)
+ mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ writel(mask_bits, hwif->intr_regs_base + offset);
+}
+EXPORT_SYMBOL(hinic_set_msix_state);
+
+static void disable_all_msix(struct hinic_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hinic_set_msix_state(hwdev, i, HINIC_MSIX_DISABLE);
+}
+
+int wait_until_doorbell_flush_states(struct hinic_hwif *hwif,
+ enum hinic_doorbell_ctrl states)
+{
+ enum hinic_doorbell_ctrl db_ctrl;
+ u32 cnt = 0;
+
+ if (!hwif)
+ return -EFAULT;
+
+ while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) {
+ db_ctrl = hinic_get_doorbell_ctrl_status(hwif);
+ if (db_ctrl == states)
+ return 0;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+EXPORT_SYMBOL(wait_until_doorbell_flush_states);
+
+static int wait_until_doorbell_and_outbound_enabled(struct hinic_hwif *hwif)
+{
+ enum hinic_doorbell_ctrl db_ctrl;
+ enum hinic_outbound_ctrl outbound_ctrl;
+ u32 cnt = 0;
+
+ while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) {
+ db_ctrl = hinic_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hinic_get_outbound_ctrl_status(hwif);
+
+ if (outbound_ctrl == ENABLE_OUTBOUND &&
+ db_ctrl == ENABLE_DOORBELL)
+ return 0;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+
+static void __print_selftest_reg(struct hinic_hwdev *hwdev)
+{
+ u32 addr, attr0, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwdev->hwif, addr);
+
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ sdk_err(hwdev->dev_hdl, "PCIE is link down\n");
+ return;
+ }
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwdev->hwif, addr);
+ if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF)
+ sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n",
+ hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_SELFTEST_RESULT));
+}
+
+/**
+ * hinic_init_hwif - initialize the hw interface
+ * @hwif: the hardware interface of a pci function device
+ * @pdev: the pci device that will be part of the hwif struct
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base,
+ void *intr_reg_base, u64 db_base_phy,
+ void *db_base, void *dwqe_mapping)
+{
+ struct hinic_hwif *hwif;
+ int err;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return -ENOMEM;
+
+ hwdev->hwif = hwif;
+ hwif->pdev = hwdev->pcidev_hdl;
+
+ hwif->cfg_regs_base = cfg_reg_base;
+ hwif->intr_regs_base = intr_reg_base;
+ pr_info("init intr_regs_base=%p", hwif->intr_regs_base);
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = db_base;
+ hwif->dwqe_mapping = dwqe_mapping;
+ init_db_area_idx(&hwif->free_db_area);
+
+ err = wait_hwif_ready(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Chip status is not ready\n");
+ __print_selftest_reg(hwdev);
+ goto hwif_ready_err;
+ }
+
+ get_hwif_attr(hwif);
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n");
+ goto hwif_ready_err;
+ }
+
+ if (!HINIC_IS_VF(hwdev)) {
+ set_ppf(hwif);
+
+ if (HINIC_IS_PPF(hwdev))
+ set_mpf(hwif);
+
+ get_mpf(hwif);
+ }
+
+ disable_all_msix(hwdev);
+
+ pr_info("global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf:
%d\n",
+ hwif->attr.func_global_idx, hwif->attr.func_type,
+ hwif->attr.pci_intf_idx, hwif->attr.ppf_idx,
+ hwif->attr.mpf_idx);
+
+ return 0;
+
+hwif_ready_err:
+ spin_lock_deinit(&hwif->free_db_area.idx_lock);
+ kfree(hwif);
+
+ return err;
+}
+
+/**
+ * hinic_free_hwif - free the hw interface
+ * @hwif: the hardware interface of a pci function device
+ * @pdev: the pci device that will be part of the hwif struct
+ **/
+void hinic_free_hwif(struct hinic_hwdev *hwdev)
+{
+ spin_lock_deinit(&hwdev->hwif->free_db_area.idx_lock);
+ kfree(hwdev->hwif);
+}
+
+int hinic_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align,
+ unsigned int flag,
+ struct hinic_dma_addr_align *mem_align)
+{
+ void *vaddr, *align_vaddr;
+ dma_addr_t paddr, align_paddr;
+ u64 real_size = size;
+
+ vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ /* align */
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev_hdl, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr));
+
+out:
+ mem_align->real_size = (u32)real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hinic_dma_free_coherent_align(void *dev_hdl,
+ struct hinic_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev_hdl, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
+
+u16 hinic_global_func_id(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_global_idx;
+}
+EXPORT_SYMBOL(hinic_global_func_id);
+
+u16 hinic_intr_num(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.num_irqs;
+}
+EXPORT_SYMBOL(hinic_intr_num);
+
+u8 hinic_pf_id_of_vf(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.port_to_port_idx;
+}
+EXPORT_SYMBOL(hinic_pf_id_of_vf);
+
+u8 hinic_pcie_itf_id(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.pci_intf_idx;
+}
+EXPORT_SYMBOL(hinic_pcie_itf_id);
+
+u8 hinic_vf_in_pf(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.vf_in_pf;
+}
+EXPORT_SYMBOL(hinic_vf_in_pf);
+
+enum func_type hinic_func_type(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_type;
+}
+EXPORT_SYMBOL(hinic_func_type);
+
+u8 hinic_ceq_num(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.num_ceqs;
+}
+EXPORT_SYMBOL(hinic_ceq_num);
+
+u8 hinic_dma_attr_entry_num(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.num_dma_attr;
+}
+EXPORT_SYMBOL(hinic_dma_attr_entry_num);
+
+u16 hinic_glb_pf_vf_offset(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.global_vf_id_of_pf;
+}
+EXPORT_SYMBOL(hinic_glb_pf_vf_offset);
+
+u8 hinic_mpf_idx(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.mpf_idx;
+}
+EXPORT_SYMBOL(hinic_mpf_idx);
+
+u8 hinic_ppf_idx(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.ppf_idx;
+}
+EXPORT_SYMBOL(hinic_ppf_idx);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
new file mode 100644
index 000000000000..c9a89e99f246
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HWIF_H
+#define HINIC_HWIF_H
+
+#include "hinic_hwdev.h"
+
+#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 2000
+
+struct hinic_free_db_area {
+ u32 db_idx[HINIC_DB_MAX_AREAS];
+
+ u32 num_free;
+
+ u32 alloc_pos;
+ u32 return_pos;
+
+ /* spinlock for allocating doorbell area */
+ spinlock_t idx_lock;
+};
+
+struct hinic_func_attr {
+ u16 func_global_idx;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 vf_in_pf;
+ enum func_type func_type;
+
+ u8 mpf_idx;
+
+ u8 ppf_idx;
+
+ u16 num_irqs; /* max: 2 ^ 15 */
+ u8 num_aeqs; /* max: 2 ^ 3 */
+ u8 num_ceqs; /* max: 2 ^ 7 */
+
+ u8 num_dma_attr; /* max: 2 ^ 6 */
+
+ u16 global_vf_id_of_pf;
+};
+
+struct hinic_hwif {
+ u8 __iomem *cfg_regs_base;
+ u8 __iomem *intr_regs_base;
+ u64 db_base_phy;
+ u8 __iomem *db_base;
+
+ void __iomem *dwqe_mapping;
+ struct hinic_free_db_area free_db_area;
+
+ struct hinic_func_attr attr;
+
+ void *pdev;
+};
+
+struct hinic_dma_addr_align {
+ u32 real_size;
+
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg);
+
+void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val);
+
+void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status);
+
+enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif);
+
+enum hinic_doorbell_ctrl
+ hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif);
+
+enum hinic_outbound_ctrl
+ hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif);
+
+void hinic_enable_doorbell(struct hinic_hwif *hwif);
+
+void hinic_disable_doorbell(struct hinic_hwif *hwif);
+
+void hinic_enable_outbound(struct hinic_hwif *hwif);
+
+void hinic_disable_outbound(struct hinic_hwif *hwif);
+
+int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base,
+ void *intr_reg_base, u64 db_base_phy,
+ void *db_base, void *dwqe_mapping);
+
+void hinic_free_hwif(struct hinic_hwdev *hwdev);
+
+int wait_until_doorbell_flush_states(struct hinic_hwif *hwif,
+ enum hinic_doorbell_ctrl states);
+
+int hinic_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align,
+ unsigned int flag,
+ struct hinic_dma_addr_align *mem_align);
+
+void hinic_dma_free_coherent_align(void *dev_hdl,
+ struct hinic_dma_addr_align *mem_align);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c
b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
new file mode 100644
index 000000000000..8f64c6a6ebcd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
@@ -0,0 +1,2685 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/rtc.h>
+#include <linux/aer.h>
+#include <linux/debugfs.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_lld.h"
+#include "hinic_pci_id_tbl.h"
+#include "hinic_nic_dev.h"
+#include "hinic_sriov.h"
+#include "hinic_dbgtool_knl.h"
+#include "hinic_nictool.h"
+
+#define HINIC_PCI_CFG_REG_BAR 0
+#define HINIC_PCI_INTR_REG_BAR 2
+#define HINIC_PCI_DB_BAR 4
+#define HINIC_PCI_VENDOR_ID 0x19e5
+
+#define HINIC_DB_DWQE_SIZE 0x00080000
+
+#define SELF_TEST_BAR_ADDR_OFFSET 0x883c
+
+#define HINIC_SECOND_BASE (1000)
+#define HINIC_SYNC_YEAR_OFFSET (1900)
+#define HINIC_SYNC_MONTH_OFFSET (1)
+#define HINIC_MINUTE_BASE (60)
+#define HINIC_WAIT_TOOL_CNT_TIMEOUT 10000
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
+#define HINIC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver"
+#define HINICVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network
Driver"
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(HINIC_DRV_DESC);
+MODULE_VERSION(HINIC_DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE))
+static DEVICE_ATTR(sriov_numvfs, 0664,
+ hinic_sriov_numvfs_show, hinic_sriov_numvfs_store);
+static DEVICE_ATTR(sriov_totalvfs, 0444,
+ hinic_sriov_totalvfs_show, NULL);
+#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */
+
+static struct attribute *hinic_attributes[] = {
+#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE))
+ &dev_attr_sriov_numvfs.attr,
+ &dev_attr_sriov_totalvfs.attr,
+#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */
+ NULL
+};
+
+static const struct attribute_group hinic_attr_group = {
+ .attrs = hinic_attributes,
+};
+
+#ifdef CONFIG_PCI_IOV
+static bool disable_vf_load;
+module_param(disable_vf_load, bool, 0444);
+MODULE_PARM_DESC(disable_vf_load,
+ "Disable virtual functions probe or not - default is false");
+#endif /* CONFIG_PCI_IOV */
+
+enum {
+ HINIC_FUNC_IN_REMOVE = BIT(0),
+};
+
+/* Structure pcidev private*/
+struct hinic_pcidev {
+ struct pci_dev *pcidev;
+ void *hwdev;
+ struct card_node *chip_node;
+ struct hinic_lld_dev lld_dev;
+ /* Record the service object address,
+ * such as hinic_dev and toe_dev, fc_dev
+ */
+ void *uld_dev[SERVICE_T_MAX];
+ /* Record the service object name */
+ char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ];
+ /* It is a the global variable for driver to manage
+ * all function device linked list
+ */
+ struct list_head node;
+
+ void __iomem *cfg_reg_base;
+ void __iomem *intr_reg_base;
+ u64 db_base_phy;
+ void __iomem *db_base;
+
+ void __iomem *dwqe_mapping;
+ /* lock for attach/detach uld */
+ struct mutex pdev_mutex;
+ struct hinic_sriov_info sriov_info;
+
+ u32 init_state;
+ /* setted when uld driver processing event */
+ unsigned long state;
+ struct pci_device_id id;
+
+ unsigned long flag;
+
+ struct work_struct slave_nic_work;
+ bool nic_cur_enable;
+ bool nic_des_enable;
+};
+
+#define HINIC_EVENT_PROCESS_TIMEOUT 10000
+
+#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0)
+#define SET_BIT(num, n) ((num) | (1UL << (n)))
+#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n))))
+
+#define MAX_CARD_ID 64
+static u64 card_bit_map;
+LIST_HEAD(g_hinic_chip_list);
+struct hinic_uld_info g_uld_info[SERVICE_T_MAX] = { {0} };
+static const char *s_uld_name[SERVICE_T_MAX] = {
+ "nic", "ovs", "roce", "toe", "iwarp",
"fc", "fcoe", };
+
+enum hinic_lld_status {
+ HINIC_NODE_CHANGE = BIT(0),
+};
+
+struct hinic_lld_lock {
+ /* lock for chip list */
+ struct mutex lld_mutex;
+ unsigned long status;
+ atomic_t dev_ref_cnt;
+};
+
+struct hinic_lld_lock g_lld_lock;
+
+#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */
+#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */
+#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */
+
+/* node in chip_node will changed, tools or driver can't get node
+ * during this situation
+ */
+static void lld_lock_chip_node(void)
+{
+ u32 loop_cnt;
+
+ mutex_lock(&g_lld_lock.lld_mutex);
+
+ loop_cnt = 0;
+ while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) {
+ if (!test_and_set_bit(HINIC_NODE_CHANGE, &g_lld_lock.status))
+ break;
+
+ loop_cnt++;
+
+ if (loop_cnt % 10000 == 0)
+ pr_warn("Wait for lld node change complete for %us\n",
+ loop_cnt / 1000);
+
+ usleep_range(900, 1000);
+ }
+
+ if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED)
+ pr_warn("Wait for lld node change complete timeout when try to get lld
lock\n");
+
+ loop_cnt = 0;
+ while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) {
+ if (!atomic_read(&g_lld_lock.dev_ref_cnt))
+ break;
+
+ loop_cnt++;
+
+ if (loop_cnt % 10000 == 0)
+ pr_warn("Wait for lld dev unused for %us, reference count: %d\n",
+ loop_cnt / 1000,
+ atomic_read(&g_lld_lock.dev_ref_cnt));
+
+ usleep_range(900, 1000);
+ }
+
+ if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY)
+ pr_warn("Wait for lld dev unused timeout\n");
+
+ mutex_unlock(&g_lld_lock.lld_mutex);
+}
+
+static void lld_unlock_chip_node(void)
+{
+ clear_bit(HINIC_NODE_CHANGE, &g_lld_lock.status);
+}
+
+/* When tools or other drivers want to get node of chip_node, use this function
+ * to prevent node be freed
+ */
+static void lld_dev_hold(void)
+{
+ u32 loop_cnt = 0;
+
+ /* ensure there have not any chip node in changing */
+ mutex_lock(&g_lld_lock.lld_mutex);
+
+ while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) {
+ if (!test_bit(HINIC_NODE_CHANGE, &g_lld_lock.status))
+ break;
+
+ loop_cnt++;
+
+ if (loop_cnt % 10000 == 0)
+ pr_warn("Wait lld node change complete for %us\n",
+ loop_cnt / 1000);
+
+ usleep_range(900, 1000);
+ }
+
+ if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT)
+ pr_warn("Wait lld node change complete timeout when try to hode lld dev\n");
+
+ atomic_inc(&g_lld_lock.dev_ref_cnt);
+
+ mutex_unlock(&g_lld_lock.lld_mutex);
+}
+
+static void lld_dev_put(void)
+{
+ atomic_dec(&g_lld_lock.dev_ref_cnt);
+}
+
+static void hinic_lld_lock_init(void)
+{
+ mutex_init(&g_lld_lock.lld_mutex);
+ atomic_set(&g_lld_lock.dev_ref_cnt, 0);
+}
+
+static atomic_t tool_used_cnt;
+
+void hinic_tool_cnt_inc(void)
+{
+ atomic_inc(&tool_used_cnt);
+}
+
+void hinic_tool_cnt_dec(void)
+{
+ atomic_dec(&tool_used_cnt);
+}
+
+static int attach_uld(struct hinic_pcidev *dev, enum hinic_service_type type,
+ struct hinic_uld_info *uld_info)
+{
+ void *uld_dev = NULL;
+ int err;
+
+ mutex_lock(&dev->pdev_mutex);
+
+ if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) {
+ sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach
uld\n");
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ if (dev->uld_dev[type]) {
+ sdk_err(&dev->pcidev->dev,
+ "%s driver has attached to pcie device\n",
+ s_uld_name[type]);
+ err = 0;
+ goto out_unlock;
+ }
+
+ if ((hinic_get_func_mode(dev->hwdev) == FUNC_MOD_NORMAL_HOST) &&
+ type == SERVICE_T_OVS && !hinic_support_ovs(dev->hwdev, NULL)) {
+ sdk_warn(&dev->pcidev->dev, "Dev not support %s\n",
+ s_uld_name[type]);
+ err = 0;
+ goto out_unlock;
+ }
+
+ err = uld_info->probe(&dev->lld_dev, &uld_dev,
dev->uld_dev_name[type]);
+ if (err || !uld_dev) {
+ sdk_err(&dev->pcidev->dev,
+ "Failed to add object for %s driver to pcie device\n",
+ s_uld_name[type]);
+ goto probe_failed;
+ }
+
+ dev->uld_dev[type] = uld_dev;
+ mutex_unlock(&dev->pdev_mutex);
+
+ sdk_info(&dev->pcidev->dev,
+ "Attach %s driver to pcie device succeed\n", s_uld_name[type]);
+ return 0;
+
+probe_failed:
+out_unlock:
+ mutex_unlock(&dev->pdev_mutex);
+
+ return err;
+}
+
+static void detach_uld(struct hinic_pcidev *dev, enum hinic_service_type type)
+{
+ struct hinic_uld_info *uld_info = &g_uld_info[type];
+ u32 cnt = 0;
+
+ mutex_lock(&dev->pdev_mutex);
+ if (!dev->uld_dev[type]) {
+ mutex_unlock(&dev->pdev_mutex);
+ return;
+ }
+
+ while (cnt < HINIC_EVENT_PROCESS_TIMEOUT) {
+ if (!test_and_set_bit(type, &dev->state))
+ break;
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ uld_info->remove(&dev->lld_dev, dev->uld_dev[type]);
+ dev->uld_dev[type] = NULL;
+ if (cnt < HINIC_EVENT_PROCESS_TIMEOUT)
+ clear_bit(type, &dev->state);
+
+ sdk_info(&dev->pcidev->dev,
+ "Detach %s driver from pcie device succeed\n",
+ s_uld_name[type]);
+ mutex_unlock(&dev->pdev_mutex);
+}
+
+#ifndef __HIFC_PANGEA__
+static void attach_ulds(struct hinic_pcidev *dev)
+{
+ enum hinic_service_type type;
+
+ for (type = SERVICE_T_OVS; type < SERVICE_T_MAX; type++) {
+ /* TODO: here need get service feature from hw mgmt,
+ * check if this pcie function shuold run the driver
+ */
+ if (g_uld_info[type].probe)
+ attach_uld(dev, type, &g_uld_info[type]);
+ }
+}
+#endif
+
+static void detach_ulds(struct hinic_pcidev *dev)
+{
+ enum hinic_service_type type;
+
+ for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) {
+ if (g_uld_info[type].probe)
+ detach_uld(dev, type);
+ }
+}
+
+int hinic_register_uld(enum hinic_service_type type,
+ struct hinic_uld_info *uld_info)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (type >= SERVICE_T_MAX) {
+ pr_err("Unknown type %d of up layer driver to register\n",
+ type);
+ return -EINVAL;
+ }
+
+ if (!uld_info || !uld_info->probe || !uld_info->remove) {
+ pr_err("Invalid information of %s driver to register\n",
+ s_uld_name[type]);
+ return -EINVAL;
+ }
+
+ lld_dev_hold();
+
+ if (g_uld_info[type].probe) {
+ pr_err("%s driver has registered\n", s_uld_name[type]);
+ lld_dev_put();
+ return -EINVAL;
+ }
+
+ memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info));
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (attach_uld(dev, type, uld_info)) {
+ sdk_err(&dev->pcidev->dev,
+ "Attach %s driver to pcie device failed\n",
+ s_uld_name[type]);
+ continue;
+ }
+ }
+ }
+
+ lld_dev_put();
+
+ pr_info("Register %s driver succeed\n", s_uld_name[type]);
+ return 0;
+}
+EXPORT_SYMBOL(hinic_register_uld);
+
+void hinic_unregister_uld(enum hinic_service_type type)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+ struct hinic_uld_info *uld_info;
+
+ if (type >= SERVICE_T_MAX) {
+ pr_err("Unknown type %d of up layer driver to unregister\n",
+ type);
+ return;
+ }
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ detach_uld(dev, type);
+ }
+ }
+
+ uld_info = &g_uld_info[type];
+ memset(uld_info, 0, sizeof(*uld_info));
+ lld_dev_put();
+}
+EXPORT_SYMBOL(hinic_unregister_uld);
+
+static void hinic_sync_time_to_fmw(struct hinic_pcidev *pdev_pri)
+{
+ struct timeval tv = {0};
+ struct rtc_time rt_time = {0};
+ u64 tv_msec;
+ int err;
+
+ do_gettimeofday(&tv);
+
+ tv_msec = tv.tv_sec * HINIC_SECOND_BASE +
+ tv.tv_usec / HINIC_SECOND_BASE;
+ err = hinic_sync_time(pdev_pri->hwdev, tv_msec);
+ if (err) {
+ sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware
failed, errno:%d.\n",
+ err);
+ } else {
+ rtc_time_to_tm(tv.tv_sec, &rt_time);
+ sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware
succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n",
+ rt_time.tm_year + HINIC_SYNC_YEAR_OFFSET,
+ rt_time.tm_mon + HINIC_SYNC_MONTH_OFFSET,
+ rt_time.tm_mday, rt_time.tm_hour,
+ rt_time.tm_min, rt_time.tm_sec);
+ }
+}
+
+enum hinic_ver_incompat_mode {
+ /* New driver can't compat with old firmware */
+ VER_INCOMP_NEW_DRV_OLD_FW,
+ /* New Firmware can't compat with old driver */
+ VER_INCOMP_NEW_FW_OLD_DRV,
+};
+
+struct hinic_version_incompat {
+ char *version;
+ char *advise;
+ u32 incompat_mode;
+};
+
+struct hinic_version_incompat ver_incompat_table[] = {
+ {
+ .version = "1.2.2.0",
+ .advise = "Mechanism of cos changed",
+ .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW),
+ },
+ {
+ .version = "1.2.3.0",
+ .advise = "Driver get sevice mode from firmware",
+ .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW),
+ },
+};
+
+#define MAX_VER_FIELD_LEN 4
+#define MAX_VER_SPLIT_NUM 4
+static void __version_split(const char *str, int *split_num,
+ char rst[][MAX_VER_FIELD_LEN])
+{
+ const char delim = '.';
+ const char *src;
+ int cnt = 0;
+ u16 idx, end, token_len;
+
+ idx = 0;
+ while (idx < strlen(str)) {
+ for (end = idx; end < strlen(str); end++) {
+ if (*(str + end) == delim)
+ break; /* find */
+ }
+
+ if (end != idx) {
+ token_len = min_t(u16, end - idx,
+ MAX_VER_FIELD_LEN - 1);
+ src = str + idx;
+ memcpy(rst[cnt], src, token_len);
+ if (++cnt >= MAX_VER_SPLIT_NUM)
+ break;
+ }
+
+ idx = end + 1; /* skip delim */
+ }
+
+ *split_num = cnt;
+}
+
+int hinic_version_cmp(char *ver1, char *ver2)
+{
+ char ver1_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
+ char ver2_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
+ int split1_num, split2_num;
+ int ver1_num, ver2_num;
+ int split;
+
+ /* To compat older firmware version: B017/B018 */
+ if (ver1[0] == 'B')
+ return -1;
+
+ if (ver2[0] == 'B')
+ return 1;
+
+ __version_split(ver1, &split1_num, ver1_split);
+ __version_split(ver2, &split2_num, ver2_split);
+
+ if (split1_num != MAX_VER_SPLIT_NUM ||
+ split2_num != MAX_VER_SPLIT_NUM) {
+ pr_err("Invalid version %s or %s\n", ver1, ver2);
+ return 0;
+ }
+
+ for (split = 0; split < MAX_VER_SPLIT_NUM; split++) {
+ ver1_num = local_atoi(ver1_split[split]);
+ ver2_num = local_atoi(ver2_split[split]);
+
+ if (ver1_num > ver2_num)
+ return 1;
+ else if (ver1_num < ver2_num)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __version_mismatch(struct hinic_pcidev *pcidev, char *cur_fw_ver,
+ char *cur_drv_ver,
+ struct hinic_version_incompat *ver_incompat,
+ int start_entry)
+{
+ struct hinic_version_incompat *ver_incmp_tmp;
+ int fw_ver_comp;
+ int i, num_entry;
+
+ fw_ver_comp = hinic_version_cmp(cur_fw_ver, ver_incompat->version);
+ if (fw_ver_comp <= 0) {
+ /* Check if new driver compatible with old fw */
+ for (i = start_entry; i >= 0; i--) {
+ ver_incmp_tmp = &ver_incompat_table[i];
+ if (hinic_version_cmp(cur_fw_ver,
+ ver_incmp_tmp->version) >= 0)
+ break; /* Not need to check anymore */
+
+ if (ver_incmp_tmp->incompat_mode &
+ BIT(VER_INCOMP_NEW_DRV_OLD_FW)) {
+ sdk_err(&pcidev->pcidev->dev,
+ "Version incompatible: %s, please update firmware to %s, or use %s
driver\n",
+ ver_incmp_tmp->advise,
+ cur_drv_ver, cur_fw_ver);
+ return -EINVAL;
+ }
+ }
+
+ goto compatible;
+ }
+
+ /* check if old driver compatible with new firmware */
+ num_entry = (int)sizeof(ver_incompat_table) /
+ (int)sizeof(ver_incompat_table[0]);
+ for (i = start_entry + 1; i < num_entry; i++) {
+ ver_incmp_tmp = &ver_incompat_table[i];
+
+ if (hinic_version_cmp(cur_fw_ver, ver_incmp_tmp->version) < 0)
+ break; /* Not need to check anymore */
+
+ if (ver_incmp_tmp->incompat_mode &
+ BIT(VER_INCOMP_NEW_FW_OLD_DRV)) {
+ sdk_err(&pcidev->pcidev->dev,
+ "Version incompatible: %s, please update driver to %s, or use %s
firmware\n",
+ ver_incmp_tmp->advise,
+ cur_fw_ver, cur_drv_ver);
+ return -EINVAL;
+ }
+ }
+
+compatible:
+ if (hinic_version_cmp(cur_drv_ver, cur_fw_ver) < 0)
+ sdk_info(&pcidev->pcidev->dev,
+ "Firmware newer than driver, you'd better update driver to %s\n",
+ cur_fw_ver);
+ else
+ sdk_info(&pcidev->pcidev->dev,
+ "Driver newer than firmware, you'd better update firmware to %s\n",
+ cur_drv_ver);
+
+ return 0;
+}
+
+static void hinic_ignore_minor_version(char *version)
+{
+ char ver_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
+ int max_ver_len, split_num = 0;
+
+ __version_split(version, &split_num, ver_split);
+ if (split_num != MAX_VER_SPLIT_NUM)
+ return;
+
+ max_ver_len = (int)strlen(version) + 1;
+ memset(version, 0, max_ver_len);
+
+ snprintf(version, max_ver_len, "%s.%s.%s.0",
+ ver_split[0], ver_split[1], ver_split[2]);
+}
+
+static int hinic_detect_version_compatible(struct hinic_pcidev *pcidev)
+{
+ struct hinic_fw_version fw_ver = { {0} };
+ struct hinic_version_incompat *ver_incompat;
+ char drv_ver[MAX_VER_SPLIT_NUM * MAX_VER_FIELD_LEN] = {0};
+ int idx, num_entry, drv_ver_len;
+ int ver_mismatch;
+ int err;
+
+ err = hinic_get_fw_version(pcidev->hwdev, &fw_ver);
+ if (err) {
+ sdk_err(&pcidev->pcidev->dev,
+ "Failed to get firmware version\n");
+ return err;
+ }
+
+ drv_ver_len = min_t(int, (int)sizeof(drv_ver) - 1,
+ (int)strlen(HINIC_DRV_VERSION));
+ memcpy(drv_ver, HINIC_DRV_VERSION, drv_ver_len);
+
+ sdk_info(&pcidev->pcidev->dev, "Version info: driver %s, firmware
%s\n",
+ drv_ver, fw_ver.mgmt_ver);
+
+ hinic_ignore_minor_version(fw_ver.mgmt_ver);
+ hinic_ignore_minor_version(drv_ver);
+ ver_mismatch = hinic_version_cmp(drv_ver, fw_ver.mgmt_ver);
+ if (!ver_mismatch)
+ return 0;
+
+ num_entry = (int)sizeof(ver_incompat_table) /
+ (int)sizeof(ver_incompat_table[0]);
+ for (idx = num_entry - 1; idx >= 0; idx--) {
+ ver_incompat = &ver_incompat_table[idx];
+
+ if (hinic_version_cmp(drv_ver, ver_incompat->version) < 0)
+ continue;
+
+ /* Find older verion of driver in table */
+ return __version_mismatch(pcidev, fw_ver.mgmt_ver, drv_ver,
+ ver_incompat, idx);
+ }
+
+ return 0;
+}
+
+struct mctp_hdr {
+ u16 resp_code;
+ u16 reason_code;
+ u32 manufacture_id;
+
+ u8 cmd_rsvd;
+ u8 major_cmd;
+ u8 sub_cmd;
+ u8 spc_field;
+};
+
+struct mctp_drv_info {
+ struct mctp_hdr hdr; /* spc_field: driver is valid, alway 0 */
+ u8 drv_name[32];
+ u8 drv_ver[MAX_VER_SPLIT_NUM];
+};
+
+struct mctp_bdf_info {
+ struct mctp_hdr hdr; /* spc_field: pf index */
+ u8 rsvd;
+ u8 bus;
+ u8 device;
+ u8 function;
+};
+
+struct ipaddr_info {
+ u8 ip[16];
+ u8 prefix; /* netmask */
+ u8 rsvd[3];
+};
+
+#define MCTP_HOST_MAX_IP_ADDR 8
+#define MCTP_IP_TYPE_V4 0U
+#define MCTP_IP_TYPE_V6 1U
+
+struct mctp_ipaddrs_info {
+ struct mctp_hdr hdr; /* spc_field: pf index */
+ u16 ip_cnt;
+ u8 ip_type_bitmap;
+ u8 rsvd;
+
+ struct ipaddr_info ip[MCTP_HOST_MAX_IP_ADDR];
+};
+
+enum mctp_resp_code {
+ /* COMMAND_COMPLETED = 0, */
+ COMMAND_FAILED = 1,
+ /* COMMAND_UNAVALILABLE = 2, */
+ COMMAND_UNSUPPORTED = 3,
+};
+
+enum mctp_reason_code {
+ /* OEM_CMD_HEAD_INFO_INVALID = 0x8002, */
+ OEM_GET_INFO_FAILED = 0x8003,
+};
+
+static void __mctp_set_hdr(struct mctp_hdr *hdr,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ u32 manufacture_id = 0x07DB;
+
+ hdr->cmd_rsvd = 0;
+ hdr->major_cmd = mctp_info->major_cmd;
+ hdr->sub_cmd = mctp_info->sub_cmd;
+ hdr->manufacture_id = cpu_to_be32(manufacture_id);
+ hdr->resp_code = cpu_to_be16(hdr->resp_code);
+ hdr->reason_code = cpu_to_be16(hdr->reason_code);
+}
+
+static void __mctp_get_drv_info(struct hinic_pcidev *pci_adapter,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ struct mctp_drv_info *drv_info = mctp_info->data;
+ char ver_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
+ int split_num = 0, split;
+ u8 ver_i[MAX_VER_SPLIT_NUM] = {0};
+ int drv_name_len;
+
+ __version_split(HINIC_DRV_VERSION, &split_num, ver_split);
+ for (split = 0; split < split_num; split++)
+ ver_i[split] = (u8)local_atoi(ver_split[split]);
+
+ drv_name_len = (int)strlen(HINIC_DRV_NAME);
+ memcpy(drv_info->drv_name, HINIC_DRV_NAME, drv_name_len);
+ memcpy(drv_info->drv_ver, ver_i, sizeof(drv_info->drv_ver));
+
+ memset(&drv_info->hdr, 0, sizeof(drv_info->hdr));
+ __mctp_set_hdr(&drv_info->hdr, mctp_info);
+
+ mctp_info->data_len = sizeof(*drv_info);
+}
+
+static void __mctp_get_bdf(struct hinic_pcidev *pci_adapter,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ struct pci_dev *pdev = pci_adapter->pcidev;
+ struct mctp_bdf_info *bdf_info = mctp_info->data;
+
+ bdf_info->bus = pdev->bus->number;
+ bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */
+ bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */
+
+ memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr));
+ __mctp_set_hdr(&bdf_info->hdr, mctp_info);
+ bdf_info->hdr.spc_field = (u8)hinic_global_func_id(pci_adapter->hwdev);
+
+ mctp_info->data_len = sizeof(*bdf_info);
+}
+
+static void __copy_ipv6(u32 *dst, __be32 *src)
+{
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst = *src;
+}
+
+static void __mctp_get_ipaddr(struct hinic_pcidev *pci_adapter,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ struct mctp_ipaddrs_info *ip_info = mctp_info->data;
+ struct ipaddr_info *ip;
+ struct hinic_nic_dev *nic_dev;
+ struct net_device *netdev;
+ struct in_device *in_dev;
+ struct inet6_dev *in6_dev;
+ struct inet6_ifaddr *ifp;
+#ifdef HAVE_INET6_IFADDR_LIST
+ struct inet6_ifaddr *tmp;
+#endif
+ u16 ip_cnt = 0;
+ bool got_lock = true;
+
+ memset(&ip_info->hdr, 0, sizeof(ip_info->hdr));
+
+ nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC];
+ if (!hinic_support_nic(pci_adapter->hwdev, NULL) || !nic_dev) {
+ ip_info->hdr.resp_code = COMMAND_FAILED;
+ ip_info->hdr.reason_code = OEM_GET_INFO_FAILED;
+ goto out;
+ }
+
+ netdev = nic_dev->netdev;
+
+ if (!rtnl_trylock())
+ got_lock = false;
+
+ in_dev = in_dev_get(netdev);
+ if (in_dev) {
+ for_ifa(in_dev) {
+ if (ip_cnt < MCTP_HOST_MAX_IP_ADDR) {
+ ip = &ip_info->ip[ip_cnt];
+ *((u32 *)(ip->ip)) = ifa->ifa_address;
+ ip->prefix = ifa->ifa_prefixlen;
+ ip_info->ip_type_bitmap |=
+ (u8)(MCTP_IP_TYPE_V4 << ip_cnt);
+ }
+
+ ip_cnt++;
+ } endfor_ifa(in_dev);
+ in_dev_put(in_dev);
+ }
+
+ if (got_lock)
+ rtnl_unlock();
+
+ in6_dev = __in6_dev_get(netdev);
+ if (!in6_dev)
+ goto out;
+
+ read_lock_bh(&in6_dev->lock);
+
+#ifdef HAVE_INET6_IFADDR_LIST
+ list_for_each_entry_safe(ifp, tmp, &in6_dev->addr_list, if_list) {
+#else
+ for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) {
+#endif
+ if (ip_cnt < MCTP_HOST_MAX_IP_ADDR) {
+ ip = &ip_info->ip[ip_cnt];
+ __copy_ipv6((u32 *)(ip->ip), ifp->addr.in6_u.u6_addr32);
+ ip->prefix = (u8)ifp->prefix_len;
+ ip_info->ip_type_bitmap |=
+ (u8)(MCTP_IP_TYPE_V6 << ip_cnt);
+ }
+
+ ip_cnt++;
+ }
+
+ read_unlock_bh(&in6_dev->lock);
+
+out:
+ ip_info->ip_cnt = cpu_to_be16(ip_cnt);
+ __mctp_set_hdr(&ip_info->hdr, mctp_info);
+ ip_info->hdr.spc_field = (u8)hinic_global_func_id(pci_adapter->hwdev);
+
+ mctp_info->data_len = sizeof(*ip_info);
+}
+
+#define MCTP_MAJOR_CMD_PUBLIC 0x0
+#define MCTP_MAJOR_CMD_NIC 0x1
+
+#define MCTP_PUBLIC_SUB_CMD_BDF 0x1
+#define MCTP_PUBLIC_SUB_CMD_DRV 0x4
+
+#define MCTP_NIC_SUB_CMD_IP 0x1
+
+static void __mctp_get_host_info(struct hinic_pcidev *dev,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ struct mctp_hdr *hdr;
+
+ switch ((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) {
+ case (MCTP_MAJOR_CMD_PUBLIC << 8 | MCTP_PUBLIC_SUB_CMD_BDF):
+ __mctp_get_bdf(dev, mctp_info);
+ break;
+
+ case (MCTP_MAJOR_CMD_PUBLIC << 8 | MCTP_PUBLIC_SUB_CMD_DRV):
+ __mctp_get_drv_info(dev, mctp_info);
+ break;
+
+ case (MCTP_MAJOR_CMD_NIC << 8 | MCTP_NIC_SUB_CMD_IP):
+ __mctp_get_ipaddr(dev, mctp_info);
+ break;
+
+ default:
+ hdr = mctp_info->data;
+ hdr->reason_code = COMMAND_UNSUPPORTED;
+ __mctp_set_hdr(hdr, mctp_info);
+ mctp_info->data_len = sizeof(*hdr);
+ break;
+ }
+}
+
+static bool __is_pcidev_match_chip_name(char *ifname, struct hinic_pcidev *dev,
+ struct card_node *chip_node,
+ enum func_type type)
+{
+ if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) {
+ if (type == TYPE_UNKNOWN) {
+ if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED)
+ return false;
+ } else {
+ if (dev->init_state >=
+ HINIC_INIT_STATE_HW_PART_INITED &&
+ hinic_func_type(dev->hwdev) != type)
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static struct hinic_pcidev *_get_pcidev_by_chip_name(char *ifname,
+ enum func_type type)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (__is_pcidev_match_chip_name(ifname, dev, chip_node,
+ type)) {
+ lld_dev_put();
+ return dev;
+ }
+ }
+ }
+
+ lld_dev_put();
+
+ return NULL;
+}
+
+static struct hinic_pcidev *hinic_get_pcidev_by_chip_name(char *ifname)
+{
+ struct hinic_pcidev *dev, *dev_hw_init;
+
+ /*find hw init device first*/
+ dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN);
+ if (dev_hw_init) {
+ if (hinic_func_type(dev_hw_init->hwdev) == TYPE_PPF)
+ return dev_hw_init;
+ }
+
+ dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF);
+ if (dev) {
+ if (dev_hw_init && dev_hw_init->init_state >= dev->init_state)
+ return dev_hw_init;
+
+ return dev;
+ }
+
+ dev = _get_pcidev_by_chip_name(ifname, TYPE_PF);
+ if (dev) {
+ if (dev_hw_init && dev_hw_init->init_state >= dev->init_state)
+ return dev_hw_init;
+
+ return dev;
+ }
+
+ dev = _get_pcidev_by_chip_name(ifname, TYPE_VF);
+ if (dev)
+ return dev;
+
+ return NULL;
+}
+
+static bool __is_pcidev_match_dev_name(char *ifname, struct hinic_pcidev *dev,
+ enum hinic_service_type type)
+{
+ struct hinic_nic_dev *nic_dev;
+ enum hinic_service_type i;
+
+ if (type == SERVICE_T_MAX) {
+ for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) {
+ if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ))
+ return true;
+ }
+ } else {
+ if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ))
+ return true;
+ }
+
+ nic_dev = dev->uld_dev[SERVICE_T_NIC];
+ if (nic_dev) {
+ if (!strncmp(nic_dev->netdev->name, ifname, IFNAMSIZ))
+ return true;
+ }
+
+ return false;
+}
+
+static struct hinic_pcidev *
+ hinic_get_pcidev_by_dev_name(char *ifname, enum hinic_service_type type)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (__is_pcidev_match_dev_name(ifname, dev, type)) {
+ lld_dev_put();
+ return dev;
+ }
+ }
+ }
+ lld_dev_put();
+
+ return NULL;
+}
+
+static struct hinic_pcidev *hinic_get_pcidev_by_ifname(char *ifname)
+{
+ struct hinic_pcidev *dev;
+
+ /* support search hwdev by chip name, net device name,
+ * or fc device name
+ */
+ /* Find pcidev by chip_name first */
+ dev = hinic_get_pcidev_by_chip_name(ifname);
+ if (dev)
+ return dev;
+
+ /* If ifname not a chip name,
+ * find pcidev by FC name or netdevice name
+ */
+ return hinic_get_pcidev_by_dev_name(ifname, SERVICE_T_MAX);
+}
+
+int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!hwdev || !ifname)
+ return -EINVAL;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->hwdev == hwdev) {
+ strncpy(ifname, chip_node->chip_name,
+ IFNAMSIZ - 1);
+ ifname[IFNAMSIZ - 1] = 0;
+ lld_dev_put();
+ return 0;
+ }
+ }
+ }
+ lld_dev_put();
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL(hinic_get_chip_name_by_hwdev);
+
+static struct card_node *hinic_get_chip_node_by_hwdev(void *hwdev)
+{
+ struct card_node *chip_node = NULL;
+ struct card_node *node_tmp = NULL;
+ struct hinic_pcidev *dev;
+
+ if (!hwdev)
+ return NULL;
+
+ lld_dev_hold();
+ list_for_each_entry(node_tmp, &g_hinic_chip_list, node) {
+ if (!chip_node) {
+ list_for_each_entry(dev, &node_tmp->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->hwdev == hwdev) {
+ chip_node = node_tmp;
+ break;
+ }
+ }
+ }
+ }
+
+ lld_dev_put();
+
+ return chip_node;
+}
+
+int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[])
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+ struct card_node *chip_node;
+ u32 cnt;
+
+ if (!dev || !hinic_support_nic(dev->hwdev, NULL))
+ return -EINVAL;
+
+ lld_dev_hold();
+
+ cnt = 0;
+ chip_node = dev->chip_node;
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->init_state < HINIC_INIT_STATE_NIC_INITED)
+ continue;
+
+ if (HINIC_FUNC_IS_VF(dev->hwdev))
+ continue;
+
+ array[cnt] = dev->uld_dev[SERVICE_T_NIC];
+ cnt++;
+ }
+ lld_dev_put();
+
+ *dev_cnt = cnt;
+
+ return 0;
+}
+
+int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted, u8 *cos_up)
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+ struct card_node *chip_node;
+
+ if (!dev)
+ return -EINVAL;
+
+ chip_node = dev->chip_node;
+ *is_setted = chip_node->cos_up_setted;
+ if (chip_node->cos_up_setted)
+ memcpy(cos_up, chip_node->cos_up, sizeof(chip_node->cos_up));
+
+ return 0;
+}
+
+int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up)
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+ struct card_node *chip_node;
+
+ if (!dev)
+ return -EINVAL;
+
+ chip_node = dev->chip_node;
+ chip_node->cos_up_setted = true;
+ memcpy(chip_node->cos_up, cos_up, sizeof(chip_node->cos_up));
+
+ return 0;
+}
+
+void *hinic_get_hwdev_by_ifname(char *ifname)
+{
+ struct hinic_pcidev *dev;
+
+ dev = hinic_get_pcidev_by_ifname(ifname);
+ if (dev)
+ return dev->hwdev;
+
+ return NULL;
+}
+
+void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type)
+{
+ struct hinic_pcidev *dev;
+
+ if (type >= SERVICE_T_MAX) {
+ pr_err("Service type :%d is error\n", type);
+ return NULL;
+ }
+
+ dev = hinic_get_pcidev_by_dev_name(ifname, type);
+ if (dev)
+ return dev->uld_dev[type];
+
+ return NULL;
+}
+
+void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type)
+{
+ struct hinic_pcidev *dev;
+
+ /* support search hwdev by chip name, net device name,
+ * or fc device name
+ */
+ /* Find pcidev by chip_name first */
+ dev = hinic_get_pcidev_by_chip_name(ifname);
+ if (dev)
+ return dev->uld_dev[type];
+
+ return NULL;
+}
+
+/* NOTICE: nictool can't use this function, because this function can't keep
+ * tool context mutual exclusive with remove context
+ */
+void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev,
+ enum hinic_service_type type)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!pdev)
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (!pci_adapter)
+ return NULL;
+
+ chip_node = pci_adapter->chip_node;
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ /* can't test HINIC_FUNC_IN_REMOVE bit in dev->flag, because
+ * TOE will call this function when detach toe driver
+ */
+
+ if (hinic_func_type(dev->hwdev) == TYPE_PPF) {
+ lld_dev_put();
+ return dev->uld_dev[type];
+ }
+ }
+ lld_dev_put();
+
+ return NULL;
+}
+EXPORT_SYMBOL(hinic_get_ppf_uld_by_pdev);
+
+void *hinic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!pdev)
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (!pci_adapter)
+ return NULL;
+
+ chip_node = pci_adapter->chip_node;
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (dev->hwdev && hinic_func_type(dev->hwdev) == TYPE_PPF) {
+ lld_dev_put();
+ return dev->hwdev;
+ }
+ }
+ lld_dev_put();
+
+ return NULL;
+}
+
+void hinic_get_all_chip_id(void *id_info)
+{
+ struct nic_card_id *card_id = (struct nic_card_id *)id_info;
+ struct card_node *chip_node;
+ int i = 0;
+ int id, err;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%d", &id);
+ if (err < 0)
+ pr_err("Failed to get hinic id\n");
+
+ card_id->id[i] = id;
+ i++;
+ }
+ lld_dev_put();
+ card_id->num = i;
+}
+
+static bool __is_func_valid(struct hinic_pcidev *dev)
+{
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ return false;
+
+ if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED)
+ return false;
+
+ if (HINIC_FUNC_IS_VF(dev->hwdev))
+ return false;
+
+ return true;
+}
+
+void hinic_get_card_info(void *hwdev, void *bufin)
+{
+ struct card_node *chip_node = NULL;
+ struct card_info *info = (struct card_info *)bufin;
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_pcidev *dev;
+ void *fun_hwdev;
+ u32 i = 0;
+
+ info->pf_num = 0;
+
+ chip_node = hinic_get_chip_node_by_hwdev(hwdev);
+ if (!chip_node)
+ return;
+
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (!__is_func_valid(dev))
+ continue;
+
+ fun_hwdev = dev->hwdev;
+
+ if (((hinic_support_fc(fun_hwdev, NULL)) ||
+ (hinic_support_fcoe(fun_hwdev, NULL))) &&
+ dev->uld_dev[SERVICE_T_FC]) {
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_FC);
+ strlcpy(info->pf[i].name,
+ dev->uld_dev_name[SERVICE_T_FC], IFNAMSIZ);
+ }
+
+ if (hinic_support_nic(fun_hwdev, NULL)) {
+ nic_dev = dev->uld_dev[SERVICE_T_NIC];
+ if (nic_dev) {
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC);
+ strlcpy(info->pf[i].name,
+ nic_dev->netdev->name, IFNAMSIZ);
+ }
+ }
+
+ if ((hinic_support_ovs(fun_hwdev, NULL)) &&
+ dev->uld_dev[SERVICE_T_OVS])
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_OVS);
+
+ if ((hinic_support_roce(fun_hwdev, NULL)) &&
+ dev->uld_dev[SERVICE_T_ROCE])
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_ROCE);
+
+ if ((hinic_support_toe(fun_hwdev, NULL)) &&
+ dev->uld_dev[SERVICE_T_TOE])
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_TOE);
+
+ if (hinic_func_for_mgmt(fun_hwdev))
+ strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ);
+
+ strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev),
+ sizeof(info->pf[i].bus_info));
+ info->pf_num++;
+ i = info->pf_num;
+ }
+ lld_dev_put();
+}
+
+int hinic_get_card_func_info_by_card_name(char *chip_name,
+ struct hinic_card_func_info
+ *card_func)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_pcidev *dev;
+ struct func_pdev_info *pdev_info;
+
+ card_func->num_pf = 0;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ))
+ continue;
+
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (hinic_func_type(dev->hwdev) == TYPE_VF)
+ continue;
+
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ pdev_info = &card_func->pdev_info[card_func->num_pf];
+ pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0);
+ pdev_info->bar0_phy_addr =
+ pci_resource_start(dev->pcidev, 0);
+
+ card_func->num_pf++;
+ if (card_func->num_pf >= MAX_SIZE)
+ break;
+ }
+ }
+
+ lld_dev_put();
+
+ return 0;
+}
+
+int hinic_get_device_id(void *hwdev, u16 *dev_id)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_pcidev *dev;
+ u16 vendor_id = 0;
+ u16 device_id = 0;
+
+ chip_node = hinic_get_chip_node_by_hwdev(hwdev);
+ if (!chip_node)
+ return -ENODEV;
+
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ pci_read_config_word(dev->pcidev, 0, &vendor_id);
+ if (vendor_id == HINIC_PCI_VENDOR_ID) {
+ pci_read_config_word(dev->pcidev, 2, &device_id);
+ break;
+ }
+ }
+ lld_dev_put();
+ *dev_id = device_id;
+
+ return 0;
+}
+
+int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_pcidev *dev;
+
+ chip_node = hinic_get_chip_node_by_hwdev(hwdev);
+ if (!chip_node)
+ return -ENODEV;
+
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (hinic_physical_port_id(dev->hwdev) == port_id) {
+ *pf_id = hinic_global_func_id(dev->hwdev);
+ break;
+ }
+ }
+ lld_dev_put();
+
+ return 0;
+}
+
+void get_fc_devname(char *devname)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->init_state < HINIC_INIT_STATE_NIC_INITED)
+ continue;
+
+ if (HINIC_FUNC_IS_VF(dev->hwdev))
+ continue;
+
+ if (dev->uld_dev[SERVICE_T_FC]) {
+ strlcpy(devname,
+ dev->uld_dev_name[SERVICE_T_FC],
+ IFNAMSIZ);
+ lld_dev_put();
+ return;
+ }
+ }
+ }
+ lld_dev_put();
+}
+
+enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+
+ if (dev)
+ return dev->init_state;
+
+ return HINIC_INIT_STATE_NONE;
+}
+
+enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname)
+{
+ struct hinic_pcidev *dev;
+
+ dev = hinic_get_pcidev_by_ifname(ifname);
+ if (dev)
+ return dev->init_state;
+
+ pr_err("Can not get device %s\n", ifname);
+
+ return HINIC_INIT_STATE_NONE;
+}
+
+int hinic_get_self_test_result(char *ifname, u32 *result)
+{
+ struct hinic_pcidev *dev = NULL;
+
+ dev = hinic_get_pcidev_by_ifname(ifname);
+ if (!dev) {
+ pr_err("Get pcidev failed by ifname: %s\n", ifname);
+ return -EFAULT;
+ }
+
+ *result = be32_to_cpu(readl((u8 __iomem *)(dev->cfg_reg_base) +
+ SELF_TEST_BAR_ADDR_OFFSET));
+ return 0;
+}
+
+struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct hinic_nic_dev *nic_dev;
+
+ if (!lld_dev || !hinic_support_nic(lld_dev->hwdev, NULL))
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(lld_dev->pdev);
+ nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC];
+ if (!nic_dev) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "There's no net device attached on the pci device");
+ return NULL;
+ }
+
+ return nic_dev->netdev;
+}
+EXPORT_SYMBOL(hinic_get_netdev_by_lld);
+
+void *hinic_get_hwdev_by_netdev(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (!nic_dev || !netdev)
+ return NULL;
+
+ return nic_dev->hwdev;
+}
+EXPORT_SYMBOL(hinic_get_hwdev_by_netdev);
+
+struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct hinic_nic_dev *nic_dev;
+
+ if (!pdev)
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (!pci_adapter || !hinic_support_nic(pci_adapter->hwdev, NULL))
+ return NULL;
+
+ nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC];
+ if (!nic_dev) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "There`s no net device attached on the pci device");
+ return NULL;
+ }
+
+ return nic_dev->netdev;
+}
+EXPORT_SYMBOL(hinic_get_netdev_by_pcidev);
+
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ return &pci_adapter->sriov_info;
+}
+
+bool hinic_is_in_host(void)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->init_state > HINIC_INIT_STATE_PCI_INITED &&
+ hinic_func_type(dev->hwdev) != TYPE_VF) {
+ lld_dev_put();
+ return true;
+ }
+ }
+ }
+ lld_dev_put();
+
+ return false;
+}
+
+int hinic_attach_nic(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]);
+}
+EXPORT_SYMBOL(hinic_attach_nic);
+
+void hinic_detach_nic(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ detach_uld(dev, SERVICE_T_NIC);
+}
+EXPORT_SYMBOL(hinic_detach_nic);
+
+int hinic_attach_roce(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ return attach_uld(dev, SERVICE_T_ROCE, &g_uld_info[SERVICE_T_ROCE]);
+}
+EXPORT_SYMBOL(hinic_attach_roce);
+
+void hinic_detach_roce(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ detach_uld(dev, SERVICE_T_ROCE);
+}
+EXPORT_SYMBOL(hinic_detach_roce);
+
+static int __set_nic_rss_state(struct hinic_pcidev *dev, bool enable)
+{
+ void *nic_uld;
+ int err = 0;
+
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ return 0;
+
+ nic_uld = dev->uld_dev[SERVICE_T_NIC];
+ if (!hinic_support_nic(dev->hwdev, NULL) || !nic_uld)
+ return 0;
+
+ if (hinic_func_type(dev->hwdev) == TYPE_VF)
+ return 0;
+
+ if (enable)
+ err = hinic_enable_func_rss(nic_uld);
+ else
+ err = hinic_disable_func_rss(nic_uld);
+ if (err) {
+ sdk_err(&dev->pcidev->dev, "Failed to %s rss\n",
+ enable ? "enable" : "disable");
+ }
+
+ return err;
+}
+
+int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *adapter;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+
+ return __set_nic_rss_state(adapter, false);
+}
+EXPORT_SYMBOL(hinic_disable_nic_rss);
+
+int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *adapter;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+
+ return __set_nic_rss_state(adapter, true);
+}
+EXPORT_SYMBOL(hinic_enable_nic_rss);
+
+struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *adapter;
+
+ if (!pdev)
+ return NULL;
+
+ adapter = pci_get_drvdata(pdev);
+
+ return &adapter->id;
+}
+
+static int __set_nic_func_state(struct hinic_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pcidev;
+ int err;
+ bool enable_nic;
+
+ hinic_get_func_nic_enable(pci_adapter->hwdev,
+ hinic_global_func_id(pci_adapter->hwdev),
+ &enable_nic);
+ if (enable_nic) {
+ err = attach_uld(pci_adapter, SERVICE_T_NIC,
+ &g_uld_info[SERVICE_T_NIC]);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to initialize NIC\n");
+ return err;
+ }
+
+ if (pci_adapter->init_state < HINIC_INIT_STATE_NIC_INITED)
+ pci_adapter->init_state = HINIC_INIT_STATE_NIC_INITED;
+ } else {
+ detach_uld(pci_adapter, SERVICE_T_NIC);
+ }
+
+ return 0;
+}
+
+static void slave_host_mgmt_work(struct work_struct *work)
+{
+ struct hinic_pcidev *pci_adapter =
+ container_of(work, struct hinic_pcidev, slave_nic_work);
+
+ __set_nic_func_state(pci_adapter);
+
+ /* TODO: if failed, send message to master host */
+}
+
+static void __multi_host_mgmt(struct hinic_pcidev *dev,
+ struct hinic_multi_host_mgmt_event *mhost_mgmt)
+{
+ struct hinic_pcidev *des_dev;
+ struct hinic_mhost_nic_func_state *nic_state = {0};
+
+ switch (mhost_mgmt->sub_cmd) {
+ case HINIC_MHOST_NIC_STATE_CHANGE:
+ nic_state = mhost_mgmt->data;
+
+ nic_state->status = 0;
+
+ /* find func_idx pci_adapter and disable or enable nic */
+ lld_dev_hold();
+ list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (hinic_global_func_id(des_dev->hwdev) !=
+ nic_state->func_idx)
+ continue;
+
+ if (des_dev->init_state <
+ HINIC_INIT_STATE_DBGTOOL_INITED) {
+ nic_state->status = 1;
+ break;
+ }
+
+ sdk_info(&dev->pcidev->dev, "Receive nic state changed event, state:
%d\n",
+ nic_state->enable);
+
+ /* schedule_work */
+ schedule_work(&des_dev->slave_nic_work);
+
+ break;
+ }
+ lld_dev_put();
+
+ break;
+
+ default:
+ sdk_warn(&dev->pcidev->dev, "Received unknown multi-host mgmt event
%d\n",
+ mhost_mgmt->sub_cmd);
+ break;
+ }
+}
+
+void hinic_event_process(void *adapter, struct hinic_event_info *event)
+{
+ struct hinic_pcidev *dev = adapter;
+ enum hinic_service_type type;
+
+ if (event->type == HINIC_EVENT_FMW_ACT_NTC)
+ return hinic_sync_time_to_fmw(dev);
+ else if (event->type == HINIC_EVENT_MCTP_GET_HOST_INFO)
+ return __mctp_get_host_info(dev, &event->mctp_info);
+ else if (event->type == HINIC_EVENT_MULTI_HOST_MGMT)
+ return __multi_host_mgmt(dev, &event->mhost_mgmt);
+
+ for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) {
+ if (test_and_set_bit(type, &dev->state)) {
+ sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in
detach\n",
+ event->type, s_uld_name[type]);
+ continue;
+ }
+
+ if (g_uld_info[type].event)
+ g_uld_info[type].event(&dev->lld_dev,
+ dev->uld_dev[type], event);
+ clear_bit(type, &dev->state);
+ }
+}
+
+static int mapping_bar(struct pci_dev *pdev, struct hinic_pcidev *pci_adapter)
+{
+ u64 dwqe_addr;
+
+ pci_adapter->cfg_reg_base =
+ pci_ioremap_bar(pdev, HINIC_PCI_CFG_REG_BAR);
+ if (!pci_adapter->cfg_reg_base) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
+ HINIC_PCI_INTR_REG_BAR);
+ if (!pci_adapter->intr_reg_base) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to map interrupt regs\n");
+ goto map_intr_bar_err;
+ }
+
+ pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC_PCI_DB_BAR);
+ pci_adapter->db_base = ioremap(pci_adapter->db_base_phy,
+ HINIC_DB_DWQE_SIZE);
+ if (!pci_adapter->db_base) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to map doorbell regs\n");
+ goto map_db_err;
+ }
+
+ dwqe_addr = pci_adapter->db_base_phy + HINIC_DB_DWQE_SIZE;
+
+ /* arm do not support call ioremap_wc(), refer to */
+ pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, HINIC_DB_DWQE_SIZE,
+ __pgprot(PROT_DEVICE_NGNRNE));
+ if (!pci_adapter->dwqe_mapping) {
+ sdk_err(&pci_adapter->pcidev->dev, "Failed to
io_mapping_create_wc\n");
+ goto mapping_dwqe_err;
+ }
+
+ return 0;
+
+mapping_dwqe_err:
+ iounmap(pci_adapter->db_base);
+
+map_db_err:
+ iounmap(pci_adapter->intr_reg_base);
+
+map_intr_bar_err:
+ iounmap(pci_adapter->cfg_reg_base);
+
+ return -ENOMEM;
+}
+
+static void unmapping_bar(struct hinic_pcidev *pci_adapter)
+{
+ iounmap(pci_adapter->dwqe_mapping);
+
+ iounmap(pci_adapter->db_base);
+ iounmap(pci_adapter->intr_reg_base);
+ iounmap(pci_adapter->cfg_reg_base);
+}
+
+static int alloc_chip_node(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node;
+ unsigned char i;
+ unsigned char parent_bus_number = 0;
+
+ if (!pci_is_root_bus(pci_adapter->pcidev->bus))
+ parent_bus_number = pci_adapter->pcidev->bus->parent->number;
+
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (chip_node->dp_bus_num == parent_bus_number) {
+ pci_adapter->chip_node = chip_node;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < MAX_CARD_ID; i++) {
+ if (!FIND_BIT(card_bit_map, i)) {
+ card_bit_map = (u64)SET_BIT(card_bit_map, i);
+ break;
+ }
+ }
+
+ if (i == MAX_CARD_ID) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to alloc card id\n");
+ return -EFAULT;
+ }
+
+ chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL);
+ if (!chip_node) {
+ card_bit_map = CLEAR_BIT(card_bit_map, i);
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to alloc chip node\n");
+ return -ENOMEM;
+ }
+
+ chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL);
+ if (!(chip_node->dbgtool_attr_file.name)) {
+ kfree(chip_node);
+ card_bit_map = CLEAR_BIT(card_bit_map, i);
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to alloc dbgtool attr file name\n");
+ return -ENOMEM;
+ }
+
+ /* parent bus number */
+ chip_node->dp_bus_num = parent_bus_number;
+
+ snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", HINIC_CHIP_NAME, i);
+ snprintf((char *)chip_node->dbgtool_attr_file.name,
+ IFNAMSIZ, "%s%d", HINIC_CHIP_NAME, i);
+ sdk_info(&pci_adapter->pcidev->dev,
+ "Add new chip %s to global list succeed\n",
+ chip_node->chip_name);
+
+ list_add_tail(&chip_node->node, &g_hinic_chip_list);
+
+ INIT_LIST_HEAD(&chip_node->func_list);
+ pci_adapter->chip_node = chip_node;
+
+ return 0;
+}
+
+static void free_chip_node(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node = pci_adapter->chip_node;
+ int id, err;
+
+ if (list_empty(&chip_node->func_list)) {
+ list_del(&chip_node->node);
+ sdk_info(&pci_adapter->pcidev->dev,
+ "Delete chip %s from global list succeed\n",
+ chip_node->chip_name);
+ err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%d", &id);
+ if (err < 0)
+ sdk_err(&pci_adapter->pcidev->dev, "Failed to get hinic id\n");
+
+ card_bit_map = CLEAR_BIT(card_bit_map, id);
+
+ kfree(chip_node->dbgtool_attr_file.name);
+ kfree(chip_node);
+ }
+}
+
+static bool hinic_get_vf_load_state(struct pci_dev *pdev)
+{
+ unsigned char parent_bus_number;
+ struct card_node *chip_node;
+ u8 id;
+
+ if (!pdev->is_virtfn)
+ return false;
+
+ /*vf used in vm*/
+ if (pci_is_root_bus(pdev->bus))
+ return disable_vf_load;
+
+ parent_bus_number = pdev->bus->parent->number;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (chip_node->dp_bus_num == parent_bus_number) {
+ for (id = 0; id < HINIC_MAX_PF_NUM; id++) {
+ if (chip_node->pf_bus_num[id] ==
+ pdev->bus->number) {
+ lld_dev_put();
+ return chip_node->disable_vf_load[id];
+ }
+ }
+ }
+ }
+ lld_dev_put();
+
+ return disable_vf_load;
+}
+
+static void hinic_set_vf_load_state(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node;
+ bool vf_load_state;
+ u16 func_id;
+
+ if (hinic_func_type(pci_adapter->hwdev) == TYPE_VF)
+ return;
+
+ vf_load_state = hinic_support_ovs(pci_adapter->hwdev, NULL) ?
+ true : disable_vf_load;
+
+ func_id = hinic_global_func_id(pci_adapter->hwdev);
+
+ chip_node = pci_adapter->chip_node;
+ chip_node->disable_vf_load[func_id] = vf_load_state;
+ chip_node->pf_bus_num[func_id] = pci_adapter->pcidev->bus->number;
+
+ sdk_info(&pci_adapter->pcidev->dev, "Current function support %s, %s vf
load in host\n",
+ (hinic_support_ovs(pci_adapter->hwdev, NULL) ? "ovs" : "nic"),
+ (vf_load_state ? "disable" : "enable"));
+}
+
+static int hinic_config_deft_mrss(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static int hinic_config_pci_cto(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static int hinic_pci_init(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = NULL;
+ int err;
+
+ err = hinic_config_deft_mrss(pdev);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to configure Max Read Request Size\n");
+ return err;
+ }
+
+ err = hinic_config_pci_cto(pdev);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to configure Completion timeout\n");
+ return err;
+ }
+
+ pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
+ if (!pci_adapter) {
+ sdk_err(&pdev->dev,
+ "Failed to alloc pci device adapter\n");
+ return -ENOMEM;
+ }
+ pci_adapter->pcidev = pdev;
+ mutex_init(&pci_adapter->pdev_mutex);
+
+ pci_set_drvdata(pdev, pci_adapter);
+
+#ifdef CONFIG_PCI_IOV
+ if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) {
+ sdk_info(&pdev->dev, "VFs are not binded to hinic\n");
+ return 0;
+ }
+#endif
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto pci_enable_err;
+ }
+
+ err = pci_request_regions(pdev, HINIC_DRV_NAME);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to request regions\n");
+ goto pci_regions_err;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto dma_mask_err;
+ }
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ sdk_warn(&pdev->dev,
+ "Couldn't set 64-bit coherent DMA mask\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ sdk_err(&pdev->dev,
+ "Failed to set coherent DMA mask\n");
+ goto dma_consistnet_mask_err;
+ }
+ }
+
+ return 0;
+
+dma_consistnet_mask_err:
+dma_mask_err:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+
+pci_regions_err:
+ pci_disable_device(pdev);
+
+pci_enable_err:
+ pci_set_drvdata(pdev, NULL);
+ kfree(pci_adapter);
+
+ return err;
+}
+
+static void hinic_pci_deinit(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ kfree(pci_adapter);
+}
+
+static void hinic_notify_ppf_unreg(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node = pci_adapter->chip_node;
+ struct hinic_pcidev *dev;
+
+ if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
+ return;
+
+ lld_lock_chip_node();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ hinic_ppf_hwdev_unreg(dev->hwdev);
+ }
+ lld_unlock_chip_node();
+}
+
+static void hinic_notify_ppf_reg(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node = pci_adapter->chip_node;
+ struct hinic_pcidev *dev;
+
+ if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
+ return;
+
+ lld_lock_chip_node();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ hinic_ppf_hwdev_reg(dev->hwdev, pci_adapter->hwdev);
+ }
+ lld_unlock_chip_node();
+}
+
+static int hinic_func_init(struct pci_dev *pdev,
+ struct hinic_pcidev *pci_adapter)
+{
+ struct hinic_init_para init_para;
+ int err;
+
+ init_para.adapter_hdl = pci_adapter;
+ init_para.pcidev_hdl = pdev;
+ init_para.dev_hdl = &pdev->dev;
+ init_para.cfg_reg_base = pci_adapter->cfg_reg_base;
+ init_para.intr_reg_base = pci_adapter->intr_reg_base;
+ init_para.db_base = pci_adapter->db_base;
+ init_para.db_base_phy = pci_adapter->db_base_phy;
+ init_para.dwqe_mapping = pci_adapter->dwqe_mapping;
+ init_para.hwdev = &pci_adapter->hwdev;
+ init_para.chip_node = pci_adapter->chip_node;
+ init_para.ppf_hwdev = hinic_get_ppf_hwdev_by_pdev(pdev);
+ err = hinic_init_hwdev(&init_para);
+ if (err < 0) {
+ pci_adapter->hwdev = NULL;
+ sdk_err(&pdev->dev, "Failed to initialize hardware device\n");
+ return -EFAULT;
+ } else if (err > 0) {
+ sdk_err(&pdev->dev, "Initialize hardware device partitial failed\n");
+ hinic_detect_version_compatible(pci_adapter);
+ hinic_notify_ppf_reg(pci_adapter);
+ pci_adapter->init_state = HINIC_INIT_STATE_HW_PART_INITED;
+ return -EFAULT;
+ }
+
+ hinic_notify_ppf_reg(pci_adapter);
+ pci_adapter->init_state = HINIC_INIT_STATE_HWDEV_INITED;
+
+ hinic_set_vf_load_state(pci_adapter);
+
+ pci_adapter->lld_dev.pdev = pdev;
+ pci_adapter->lld_dev.hwdev = pci_adapter->hwdev;
+ pci_adapter->sriov_info.pdev = pdev;
+ pci_adapter->sriov_info.hwdev = pci_adapter->hwdev;
+
+ hinic_event_register(pci_adapter->hwdev, pci_adapter,
+ hinic_event_process);
+
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev))
+ hinic_sync_time_to_fmw(pci_adapter);
+
+ /*dbgtool init*/
+ lld_lock_chip_node();
+ err = dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node);
+ if (err) {
+ lld_unlock_chip_node();
+ sdk_err(&pdev->dev, "Failed to initialize dbgtool\n");
+ hinic_event_unregister(pci_adapter->hwdev);
+ return err;
+ }
+ lld_unlock_chip_node();
+
+ pci_adapter->init_state = HINIC_INIT_STATE_DBGTOOL_INITED;
+
+ err = hinic_detect_version_compatible(pci_adapter);
+ if (err)
+ return err;
+
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev) &&
+ FUNC_ENABLE_SRIOV_IN_DEFAULT(pci_adapter->hwdev)) {
+ hinic_pci_sriov_enable(pdev,
+ hinic_func_max_vf(pci_adapter->hwdev));
+ }
+
+ /* NIC is base driver, probe firstly */
+ err = __set_nic_func_state(pci_adapter);
+ if (err)
+ return err;
+
+#ifndef __HIFC_PANGEA__
+ attach_ulds(pci_adapter);
+#else
+ if (g_uld_info[SERVICE_T_FC].probe) {
+ err = attach_uld(pci_adapter, SERVICE_T_FC,
+ &g_uld_info[SERVICE_T_FC]);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to initialize FC\n");
+ return err;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void hinic_func_deinit(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic_set_func_deinit_flag(pci_adapter->hwdev);
+ hinic_flush_mgmt_workq(pci_adapter->hwdev);
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_NIC_INITED) {
+ detach_ulds(pci_adapter);
+ detach_uld(pci_adapter, SERVICE_T_NIC);
+ }
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_DBGTOOL_INITED) {
+ lld_lock_chip_node();
+ dbgtool_knl_deinit(pci_adapter->hwdev, pci_adapter->chip_node);
+ lld_unlock_chip_node();
+ hinic_event_unregister(pci_adapter->hwdev);
+ }
+
+ hinic_notify_ppf_unreg(pci_adapter);
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
+ hinic_free_hwdev(pci_adapter->hwdev);
+}
+
+#ifdef CONFIG_X86
+/**
+ * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma
+ * order register to zero
+ * @pci_adapter: pci_adapter
+ **/
+/*lint -save -e40 */
+void cfg_order_reg(struct hinic_pcidev *pci_adapter)
+{
+ u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56};
+ struct cpuinfo_x86 *cpuinfo;
+ u32 i;
+
+ if (HINIC_FUNC_IS_VF(pci_adapter->hwdev))
+ return;
+
+ cpuinfo = &cpu_data(0);
+ for (i = 0; i < sizeof(cpu_model); i++) {
+ if (cpu_model[i] == cpuinfo->x86_model)
+ hinic_set_pcie_order_cfg(pci_adapter->hwdev);
+ }
+}
+
+/*lint -restore*/
+#endif
+
+static void wait_tool_unused(void)
+{
+ u32 loop_cnt = 0;
+
+ while (loop_cnt < HINIC_WAIT_TOOL_CNT_TIMEOUT) {
+ if (!atomic_read(&tool_used_cnt))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
+static inline void wait_sriov_cfg_complete(struct hinic_pcidev *pci_adapter)
+{
+ struct hinic_sriov_info *sriov_info;
+ u32 loop_cnt = 0;
+
+ sriov_info = &pci_adapter->sriov_info;
+
+ set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
+ usleep_range(9900, 10000);
+
+ while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
+ if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
+ !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
+static void hinic_remove(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ if (!pci_adapter)
+ return;
+
+ sdk_info(&pdev->dev, "Pcie device remove begin\n");
+
+ if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) {
+ pci_set_drvdata(pdev, NULL);
+ kfree(pci_adapter);
+ return;
+ }
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
+ hinic_detect_hw_present(pci_adapter->hwdev);
+
+ cancel_work_sync(&pci_adapter->slave_nic_work);
+
+ switch (pci_adapter->init_state) {
+ case HINIC_INIT_STATE_ALL_INITED:
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev))
+ sysfs_remove_group(&pdev->dev.kobj, &hinic_attr_group);
+ /*lint -fallthrough*/
+ case HINIC_INIT_STATE_NIC_INITED:
+ /* Don't support hotplug when SR-IOV is enabled now.
+ * So disable SR-IOV capability as normal.
+ */
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) {
+ wait_sriov_cfg_complete(pci_adapter);
+ hinic_pci_sriov_disable(pdev);
+ }
+ /*lint -fallthrough*/
+ case HINIC_INIT_STATE_DBGTOOL_INITED:
+ case HINIC_INIT_STATE_HW_PART_INITED:
+ case HINIC_INIT_STATE_HWDEV_INITED:
+ case HINIC_INIT_STATE_PCI_INITED:
+ set_bit(HINIC_FUNC_IN_REMOVE, &pci_adapter->flag);
+ wait_tool_unused();
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
+ hinic_func_deinit(pdev);
+
+ lld_lock_chip_node();
+ list_del(&pci_adapter->node);
+ nictool_k_uninit();
+ free_chip_node(pci_adapter);
+ lld_unlock_chip_node();
+ unmapping_bar(pci_adapter);
+ hinic_pci_deinit(pdev);
+
+ /*lint -fallthrough*/
+ break;
+
+ default:
+ break;
+ }
+
+ sdk_info(&pdev->dev, "Pcie device removed\n");
+}
+
+static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hinic_pcidev *pci_adapter;
+ int err;
+
+ sdk_info(&pdev->dev, "Pcie device probe begin\n");
+
+ err = hinic_pci_init(pdev);
+ if (err)
+ return err;
+
+#ifdef CONFIG_PCI_IOV
+ if (pdev->is_virtfn && hinic_get_vf_load_state(pdev))
+ return 0;
+#endif
+
+ pci_adapter = pci_get_drvdata(pdev);
+
+ err = mapping_bar(pdev, pci_adapter);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to map bar\n");
+ goto map_bar_failed;
+ }
+
+ pci_adapter->id = *id;
+ INIT_WORK(&pci_adapter->slave_nic_work, slave_host_mgmt_work);
+
+ /* if chip information of pcie function exist,
+ * add the function into chip
+ */
+ lld_lock_chip_node();
+ err = alloc_chip_node(pci_adapter);
+ if (err) {
+ sdk_err(&pdev->dev,
+ "Failed to add new chip node to global list\n");
+ goto alloc_chip_node_fail;
+ }
+
+ err = nictool_k_init();
+ if (err) {
+ sdk_warn(&pdev->dev, "Failed to init nictool");
+ goto init_nictool_err;
+ }
+
+ list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list);
+
+ lld_unlock_chip_node();
+
+ pci_adapter->init_state = HINIC_INIT_STATE_PCI_INITED;
+
+ err = hinic_func_init(pdev, pci_adapter);
+ if (err)
+ goto func_init_err;
+
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) {
+ err = sysfs_create_group(&pdev->dev.kobj, &hinic_attr_group);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to create sysfs group\n");
+ goto sysfs_create_err;
+ }
+ }
+
+#ifdef CONFIG_X86
+ cfg_order_reg(pci_adapter);
+#endif
+
+ sdk_info(&pdev->dev, "Pcie device probed\n");
+ pci_adapter->init_state = HINIC_INIT_STATE_ALL_INITED;
+
+ return 0;
+
+sysfs_create_err:
+func_init_err:
+ return 0;
+
+init_nictool_err:
+ free_chip_node(pci_adapter);
+
+alloc_chip_node_fail:
+ lld_unlock_chip_node();
+ unmapping_bar(pci_adapter);
+
+map_bar_failed:
+ hinic_pci_deinit(pdev);
+
+ sdk_err(&pdev->dev, "Pcie device probe failed\n");
+ return err;
+}
+
+/*lint -save -e133 -e10*/
+static const struct pci_device_id hinic_pci_table[] = {
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PF), HINIC_BOARD_25GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF_HV), 0},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_SMTIO), HINIC_BOARD_PG_SM_25GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_100GE),
+ HINIC_BOARD_PG_100GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_TP_10GE),
+ HINIC_BOARD_PG_TP_10GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_40GE), HINIC_BOARD_40GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_100GE), HINIC_BOARD_100GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_25GE), HINIC_BOARD_25GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_MULTI_HOST), HINIC_BOARD_25GE},
+ {0, 0}
+};
+
+/*lint -restore*/
+MODULE_DEVICE_TABLE(pci, hinic_pci_table);
+
+/**
+ * hinic_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ *
+ * Since we only need error detecting not error handling, so we
+ * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER
+ * driver that we don't need reset(error handling).
+ */
+static pci_ers_result_t hinic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hinic_pcidev *pci_adapter;
+
+ sdk_err(&pdev->dev,
+ "Uncorrectable error detected, log and cleanup error status: 0x%08x\n",
+ state);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ pci_adapter = pci_get_drvdata(pdev);
+
+ if (pci_adapter)
+ hinic_record_pcie_error(pci_adapter->hwdev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void hinic_shutdown(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ sdk_err(&pdev->dev, "Shutdown device\n");
+
+ if (pci_adapter)
+ hinic_shutdown_hwdev(pci_adapter->hwdev);
+}
+
+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
+static struct pci_driver_rh hinic_driver_rh = {
+ .sriov_configure = hinic_pci_sriov_configure,
+};
+#endif
+
+/* Cause we only need error detecting not error handling, so only error_detected
+ * callback is enough.
+ */
+static struct pci_error_handlers hinic_err_handler = {
+ .error_detected = hinic_io_error_detected,
+};
+
+static struct pci_driver hinic_driver = {
+ .name = HINIC_DRV_NAME,
+ .id_table = hinic_pci_table,
+ .probe = hinic_probe,
+ .remove = hinic_remove,
+ .shutdown = hinic_shutdown,
+
+#if defined(HAVE_SRIOV_CONFIGURE)
+ .sriov_configure = hinic_pci_sriov_configure,
+#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+ .rh_reserved = &hinic_driver_rh,
+#endif
+
+ .err_handler = &hinic_err_handler
+};
+
+static int __init hinic_lld_init(void)
+{
+ pr_info("%s - version %s\n", HINIC_DRV_DESC, HINIC_DRV_VERSION);
+ memset(g_uld_info, 0, sizeof(g_uld_info));
+ atomic_set(&tool_used_cnt, 0);
+
+ hinic_lld_lock_init();
+
+ /* register nic driver information first, and add net device in
+ * nic_probe called by hinic_probe.
+ */
+ hinic_register_uld(SERVICE_T_NIC, &nic_uld_info);
+
+ return pci_register_driver(&hinic_driver);
+}
+
+static void __exit hinic_lld_exit(void)
+{
+ pci_unregister_driver(&hinic_driver);
+
+ hinic_unregister_uld(SERVICE_T_NIC);
+}
+module_init(hinic_lld_init);
+module_exit(hinic_lld_exit);
+int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!micro_log_info || !micro_log_info->init ||
+ !micro_log_info->deinit) {
+ pr_err("Invalid information of micro log info to register\n");
+ return -EINVAL;
+ }
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (hinic_func_type(dev->hwdev) == TYPE_PPF) {
+ if (micro_log_info->init(dev->hwdev)) {
+ sdk_err(&dev->pcidev->dev,
+ "micro log init failed\n");
+ continue;
+ }
+ }
+ }
+ }
+ lld_dev_put();
+ pr_info("Register micro log succeed\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_register_micro_log);
+
+void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!micro_log_info)
+ return;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (hinic_func_type(dev->hwdev) == TYPE_PPF)
+ micro_log_info->deinit(dev->hwdev);
+ }
+ }
+ lld_dev_put();
+ pr_info("Unregister micro log succeed\n");
+}
+EXPORT_SYMBOL(hinic_unregister_micro_log);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.h
b/drivers/net/ethernet/huawei/hinic/hinic_lld.h
new file mode 100644
index 000000000000..da2ad9feb794
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_LLD_H_
+#define HINIC_LLD_H_
+
+struct hinic_lld_dev {
+ struct pci_dev *pdev;
+ void *hwdev;
+};
+
+enum hinic_init_state {
+ HINIC_INIT_STATE_NONE,
+ HINIC_INIT_STATE_PCI_INITED,
+ HINIC_INIT_STATE_HW_PART_INITED,
+ HINIC_INIT_STATE_HWDEV_INITED,
+ HINIC_INIT_STATE_DBGTOOL_INITED,
+ HINIC_INIT_STATE_NIC_INITED,
+ HINIC_INIT_STATE_ALL_INITED,
+};
+
+struct hinic_uld_info {
+ /* uld_dev: should not return null even the function capability
+ * is not support the up layer driver
+ * uld_dev_name: NIC driver should copy net device name.
+ * FC driver could copy fc device name.
+ * other up layer driver don`t need copy anything
+ */
+ int (*probe)(struct hinic_lld_dev *lld_dev,
+ void **uld_dev, char *uld_dev_name);
+ void (*remove)(struct hinic_lld_dev *lld_dev, void *uld_dev);
+ int (*suspend)(struct hinic_lld_dev *lld_dev,
+ void *uld_dev, pm_message_t state);
+ int (*resume)(struct hinic_lld_dev *lld_dev, void *uld_dev);
+ void (*event)(struct hinic_lld_dev *lld_dev, void *uld_dev,
+ struct hinic_event_info *event);
+ int (*ioctl)(void *uld_dev, u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+};
+
+/* Used for the ULD HiNIC PCIe driver registration interface,
+ * the original interface is service_register_interface
+ */
+int hinic_register_uld(enum hinic_service_type uld_type,
+ struct hinic_uld_info *uld_info);
+
+/* Used for the ULD HiNIC PCIe driver unregistration interface,
+ * the original interface is service_unregister_interface
+ */
+void hinic_unregister_uld(enum hinic_service_type uld_type);
+
+void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev,
+ enum hinic_service_type type);
+
+/* used for TOE/IWARP*/
+struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev);
+/* used for TOE/IWARP*/
+void *hinic_get_hwdev_by_netdev(struct net_device *netdev);
+
+struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev);
+void *hinic_get_hwdev_by_ifname(char *ifname);
+int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname);
+void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type);
+void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type);
+
+int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]);
+int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up);
+int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted,
+ u8 *cos_up);
+void hinic_get_all_chip_id(void *card_id);
+void hinic_get_card_info(void *hwdev, void *bufin);
+int hinic_get_device_id(void *hwdev, u16 *dev_id);
+void get_fc_devname(char *devname);
+int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id);
+
+void hinic_tool_cnt_inc(void);
+void hinic_tool_cnt_dec(void);
+
+struct hinic_sriov_info;
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev);
+
+/* for dpdk */
+void *hinic_get_pci_dev(u16 bdf);
+void hinic_dpdk_pcie_remove(void *pdev);
+int hinic_dpdk_pcie_probe(void *pdev);
+
+int hinic_attach_nic(struct hinic_lld_dev *lld_dev);
+void hinic_detach_nic(struct hinic_lld_dev *lld_dev);
+
+int hinic_attach_roce(struct hinic_lld_dev *lld_dev);
+void hinic_detach_roce(struct hinic_lld_dev *lld_dev);
+
+int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev);
+int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev);
+
+int hinic_get_self_test_result(char *ifname, u32 *result);
+enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname);
+enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev);
+
+extern struct hinic_uld_info g_uld_info[SERVICE_T_MAX];
+
+struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev);
+bool hinic_is_in_host(void);
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
new file mode 100644
index 000000000000..d397bb620030
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
@@ -0,0 +1,1605 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_mbox.h"
+
+#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
+#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
+#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
+/* The size of data to be send (unit of 4 bytes) */
+#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
+/* SO_RO(strong order, relax order) */
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
+#define HINIC_MBOX_INT_WB_EN_SHIFT 28
+
+#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
+#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
+#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
+#define HINIC_MBOX_INT_WB_EN_MASK 0x1
+
+#define HINIC_MBOX_INT_SET(val, field) \
+ (((val) & HINIC_MBOX_INT_##field##_MASK) << \
+ HINIC_MBOX_INT_##field##_SHIFT)
+
+enum hinic_mbox_tx_status {
+ TX_NOT_DONE = 1,
+};
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
+/* specifies the issue request for the message data.
+ * 0 - Tx request is done;
+ * 1 - Tx request is in process.
+ */
+#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
+#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
+
+#define HINIC_MBOX_CTRL_SET(val, field) \
+ (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
+ HINIC_MBOX_CTRL_##field##_SHIFT)
+
+#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MBOX_HEADER_MODULE_SHIFT 11
+#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MBOX_HEADER_SEQID_SHIFT 24
+#define HINIC_MBOX_HEADER_LAST_SHIFT 30
+/* specifies the mailbox message direction
+ * 0 - send
+ * 1 - receive
+ */
+#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MBOX_HEADER_CMD_SHIFT 32
+#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
+#define HINIC_MBOX_HEADER_STATUS_SHIFT 48
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
+
+#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
+#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
+#define HINIC_MBOX_HEADER_LAST_MASK 0x1
+#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MBOX_HEADER_CMD_MASK 0xFF
+#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
+#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
+
+#define HINIC_MBOX_HEADER_GET(val, field) \
+ (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
+ HINIC_MBOX_HEADER_##field##_MASK)
+#define HINIC_MBOX_HEADER_SET(val, field) \
+ ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
+ HINIC_MBOX_HEADER_##field##_SHIFT)
+
+#define MBOX_SEGLEN_MASK \
+ HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
+
+#define HINIC_MBOX_SEG_LEN 48
+#define HINIC_MBOX_COMP_TIME 8000U
+#define MBOX_MSG_POLLING_TIMEOUT 8000
+
+#define HINIC_MBOX_DATA_SIZE 2040
+
+#define MBOX_MAX_BUF_SZ 2048UL
+#define MBOX_HEADER_SZ 8
+
+#define MBOX_INFO_SZ 4
+
+/* MBOX size is 64B, 8B for mbox_header, 4B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16UL
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define SEQ_ID_START_VAL 0
+#define SEQ_ID_MAX_VAL 42
+
+#define DST_AEQ_IDX_DEFAULT_VAL 0
+#define SRC_AEQ_IDX_DEFAULT_VAL 0
+#define NO_DMA_ATTRIBUTE_VAL 0
+
+#define HINIC_MGMT_RSP_AEQN 0
+#define HINIC_MBOX_RSP_AEQN 2
+#define HINIC_MBOX_RECV_AEQN 0
+
+#define MBOX_MSG_NO_DATA_LEN 1
+
+#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_base + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
+
+#define MBOX_RESPONSE_ERROR 0x1
+#define MBOX_MSG_ID_MASK 0xFF
+#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
+#define MBOX_MSG_ID_INC(func_to_func) (MBOX_MSG_ID(func_to_func) = \
+ (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK)
+
+#define FUNC_ID_OFF_SET_8B 8
+#define FUNC_ID_OFF_SET_10B 10
+
+enum hinic_hwif_direction_type {
+ HINIC_HWIF_DIRECT_SEND = 0,
+ HINIC_HWIF_RESPONSE = 1,
+};
+
+enum mbox_send_mod {
+ MBOX_SEND_MSG_INT,
+};
+
+enum mbox_seg_type {
+ NOT_LAST_SEG,
+ LAST_SEG,
+};
+
+enum mbox_ordering_type {
+ STRONG_ORDER,
+};
+
+enum mbox_write_back_type {
+ WRITE_BACK = 1,
+};
+
+enum mbox_aeq_trig_type {
+ NOT_TRIGGER,
+ TRIGGER,
+};
+
+struct hinic_set_random_id {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 vf_in_pf;
+ u8 rsvd1;
+ u16 func_idx;
+ u32 random_id;
+};
+
+static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx,
+ void *buf_in, u16 in_size, u16 offset)
+{
+ u16 func_idx;
+
+ if (in_size < offset + sizeof(func_idx)) {
+ sdk_warn(hwdev->dev_hdl,
+ "Reveice mailbox msg len: %d less than 10 Bytes is invalid\n",
+ in_size);
+ return false;
+ }
+
+ func_idx = *((u16 *)((u8 *)buf_in + offset));
+
+ if (src_func_idx != func_idx) {
+ sdk_warn(hwdev->dev_hdl,
+ "Reveice mailbox function id(0x%x) not equal to msg function id(0x%x)\n",
+ src_func_idx, func_idx);
+ return false;
+ }
+
+ return true;
+}
+
+bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ return check_func_id(hwdev, func_idx, buf_in, in_size,
+ FUNC_ID_OFF_SET_8B);
+}
+
+bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ return check_func_id(hwdev, func_idx, buf_in, in_size,
+ FUNC_ID_OFF_SET_10B);
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info);
+
+/**
+ * hinic_register_ppf_mbox_cb - register mbox callback for ppf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * @handle specific mod's private data that will be used in callback
+ * @callback: callback function
+ * Return:
+ */
+int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_ppf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->ppf_mbox_cb[mod] = callback;
+
+ return 0;
+}
+
+/**
+ * hinic_register_pf_mbox_cb - register mbox callback for pf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * @handle specific mod's private data that will be used in callback
+ * @callback: callback function
+ * Return:
+ */
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->pf_mbox_cb[mod] = callback;
+
+ return 0;
+}
+
+/**
+ * hinic_register_vf_mbox_cb - register mbox callback for vf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * @handle specific mod's private data that will be used in callback
+ * @callback: callback function
+ * Return:
+ */
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->vf_mbox_cb[mod] = callback;
+
+ return 0;
+}
+
+/**
+ * hinic_register_ppf_to_pf_mbox_cb - register mbox callback for pf from ppf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * @handle specific mod's private data that will be used in callback
+ * @callback: callback function
+ * Return:
+ */
+int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_recv_from_ppf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->pf_recv_from_ppf_mbox_cb[mod] = callback;
+
+ return 0;
+}
+
+/**
+ * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for ppf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * Return:
+ */
+void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ func_to_func->ppf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * Return:
+ */
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ func_to_func->pf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for vf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * Return:
+ */
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ func_to_func->vf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf
+ * @func_to_func: pointer to func_to_func part of the chip
+ * @mod: specific mod that the callback will handle
+ * Return:
+ */
+void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ func_to_func->pf_recv_from_ppf_mbox_cb[mod] = NULL;
+}
+
+static int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_mbox_func_to_func *func_to_func = handle;
+
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Not support vf command yet/n");
+ return -EFAULT;
+}
+
+static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size)
+{
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ if (!func_to_func->vf_mbox_cb[recv_mbox->mod]) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not
registered\n");
+ return -EINVAL;
+ }
+
+ return func_to_func->vf_mbox_cb[recv_mbox->mod](func_to_func->hwdev,
+ recv_mbox->cmd,
+ recv_mbox->mbox,
+ recv_mbox->mbox_len,
+ buf_out, out_size);
+}
+
+static int
+recv_pf_from_ppf_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size)
+{
+ hinic_pf_recv_from_ppf_mbox_cb mbox_callback;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ mbox_callback = func_to_func->pf_recv_from_ppf_mbox_cb[recv_mbox->mod];
+ if (!mbox_callback) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "PF receive ppf mailbox callback is
not registered\n");
+ return -EINVAL;
+ }
+
+ return mbox_callback(func_to_func->hwdev, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len, buf_out,
+ out_size);
+}
+
+static int recv_ppf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u8 pf_id, void *buf_out, u16 *out_size)
+{
+ u16 vf_id = 0;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ if (!func_to_func->ppf_mbox_cb[recv_mbox->mod]) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod =
%d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ return func_to_func->ppf_mbox_cb[recv_mbox->mod](func_to_func->hwdev,
+ pf_id, vf_id,
+ recv_mbox->cmd,
+ recv_mbox->mbox,
+ recv_mbox->mbox_len,
+ buf_out, out_size);
+}
+
+static int
+recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx, void *buf_out,
+ u16 *out_size)
+{
+ u16 vf_id = 0;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod =
%d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ if (!func_to_func->pf_mbox_cb[recv_mbox->mod]) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not
registered\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ vf_id = src_func_idx - hinic_glb_pf_vf_offset(func_to_func->hwdev);
+ return func_to_func->pf_mbox_cb[recv_mbox->mod](func_to_func->hwdev,
+ vf_id, recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len, buf_out, out_size);
+}
+
+bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
+ struct vf_cmd_check_handle *cmd_handle,
+ u16 vf_id, u8 cmd, void *buf_in, u16 in_size,
+ u8 size)
+{
+ u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (cmd == cmd_handle[i].cmd) {
+ if (cmd_handle[i].check_cmd)
+ return cmd_handle[i].check_cmd(hwdev, src_idx,
+ buf_in, in_size);
+ else
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx)
+{
+ struct hinic_hwdev *dev = func_to_func->hwdev;
+ struct mbox_msg_info msg_info = {0};
+ u16 out_size = MBOX_MAX_BUF_SZ;
+ void *buf_out = recv_mbox->buf_out;
+ int err = 0;
+
+ if (HINIC_IS_VF(dev)) {
+ err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
+ &out_size);
+ } else { /* pf/ppf process */
+
+ if (IS_PF_OR_PPF_SRC(src_func_idx)) {
+ if (HINIC_IS_PPF(dev)) {
+ err = recv_ppf_mbox_handler(func_to_func,
+ recv_mbox,
+ (u8)src_func_idx,
+ buf_out, &out_size);
+ if (err)
+ goto out;
+ } else {
+ err = recv_pf_from_ppf_handler(func_to_func,
+ recv_mbox,
+ buf_out,
+ &out_size);
+ if (err)
+ goto out;
+ }
+ /* The source is neither PF nor PPF, so it is from VF */
+ } else {
+ err = recv_pf_from_vf_mbox_handler(func_to_func,
+ recv_mbox,
+ src_func_idx,
+ buf_out, &out_size);
+ }
+ }
+
+out:
+ if (recv_mbox->ack_type == MBOX_ACK) {
+ msg_info.msg_id = recv_mbox->msg_info.msg_id;
+ if (err == HINIC_DEV_BUSY_ACTIVE_FW ||
+ err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
+ else if (err == HINIC_MBOX_VF_CMD_ERROR)
+ msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
+ else if (err)
+ msg_info.status = HINIC_MBOX_PF_SEND_ERR;
+
+ /* if not data need to response, set out_size to 1 */
+ if (!out_size || err)
+ out_size = MBOX_MSG_NO_DATA_LEN;
+
+ send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
+ buf_out, out_size, src_func_idx,
+ HINIC_HWIF_RESPONSE, MBOX_ACK,
+ &msg_info);
+ } else {
+ kfree(recv_mbox->buf_out);
+ kfree(recv_mbox->mbox);
+ kfree(recv_mbox);
+ }
+}
+
+static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_mbox->sed_id = seq_id;
+ } else {
+ if (seq_id != recv_mbox->sed_id + 1) {
+ recv_mbox->sed_id = 0;
+ return false;
+ }
+ recv_mbox->sed_id = seq_id;
+ }
+
+ return true;
+}
+
+static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
+ func_to_func->event_flag == EVENT_START)
+ complete(&recv_mbox->recv_done);
+ else
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x),
status(0x%x)\n",
+ func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
+ recv_mbox->msg_info.status);
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static void recv_func_mbox_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+
+ recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
+ mbox_work->src_func_idx);
+
+ destroy_work(&mbox_work->work);
+
+ kfree(mbox_work);
+}
+
+static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ void *header, struct hinic_recv_mbox *recv_mbox)
+{
+ u64 mbox_header = *((u64 *)header);
+ void *mbox_body = MBOX_BODY_FROM_HDR(header);
+ struct hinic_recv_mbox *asyc_rcv_mbox = NULL;
+ u16 src_func_idx;
+ struct hinic_mbox_work *mbox_work;
+ int pos;
+ u8 seq_id, seg_len;
+
+ seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
+ seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
+ src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x,
current id: 0x%x, seg len: 0x%x\n",
+ src_func_idx, recv_mbox->sed_id, seq_id, seg_len);
+ return;
+ }
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
+ HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
+
+ if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
+ recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
+ recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
+ recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
+ recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
+ recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
+
+ if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_HWIF_RESPONSE) {
+ resp_mbox_handler(func_to_func, recv_mbox);
+ return;
+ }
+
+ if (recv_mbox->ack_type == MBOX_NO_ACK) {
+ asyc_rcv_mbox = kzalloc(sizeof(*asyc_rcv_mbox), GFP_KERNEL);
+ if (!asyc_rcv_mbox) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Allocate asynchronous receive mbox
memory failed.\n");
+ return;
+ }
+ memcpy(asyc_rcv_mbox, recv_mbox, sizeof(*asyc_rcv_mbox));
+
+ asyc_rcv_mbox->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!asyc_rcv_mbox->mbox) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Allocate asynchronous receive mbox
message memory failed.\n");
+ goto asyc_rcv_mbox_msg_err;
+ }
+ memcpy(asyc_rcv_mbox->mbox, recv_mbox->mbox, MBOX_MAX_BUF_SZ);
+
+ asyc_rcv_mbox->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!asyc_rcv_mbox->buf_out) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Allocate asynchronous receive mbox
out buffer memory failed.\n");
+ goto asyc_rcv_mbox_buf_err;
+ }
+ }
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Allocate mbox work memory
failed.\n");
+ if (recv_mbox->ack_type == MBOX_NO_ACK)
+ goto mbox_work_err;
+ else
+ return;
+ }
+
+ mbox_work->func_to_func = func_to_func;
+
+ if (recv_mbox->ack_type == MBOX_NO_ACK)
+ mbox_work->recv_mbox = asyc_rcv_mbox;
+ else
+ mbox_work->recv_mbox = recv_mbox;
+
+ mbox_work->src_func_idx = src_func_idx;
+ INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return;
+
+mbox_work_err:
+ kfree(asyc_rcv_mbox->buf_out);
+
+asyc_rcv_mbox_buf_err:
+ kfree(asyc_rcv_mbox->mbox);
+
+asyc_rcv_mbox_msg_err:
+ kfree(asyc_rcv_mbox);
+}
+
+int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ struct hinic_set_random_id rand_info = {0};
+ u16 out_size = sizeof(rand_info);
+ int ret;
+
+ rand_info.version = HINIC_CMD_VER_FUNC_ID;
+ rand_info.func_idx = func_id;
+ rand_info.vf_in_pf = (u8)(func_id - hinic_glb_pf_vf_offset(hwdev));
+ get_random_bytes(&rand_info.random_id, sizeof(u32));
+
+ func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id;
+
+ ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID,
+ &rand_info, sizeof(rand_info),
+ &rand_info, &out_size, 0);
+ if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rand_info.status) || !out_size || ret) {
+ sdk_err(hwdev->dev_hdl, "Failed to set vf random id, err: %d, status: 0x%x, out
size: 0x%x\n",
+ ret, rand_info.status, out_size);
+ return -EINVAL;
+ }
+
+ if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
+ return rand_info.status;
+
+ func_to_func->vf_mbx_old_rand_id[func_id] =
+ func_to_func->vf_mbx_rand_id[func_id];
+
+ return 0;
+}
+
+static void update_random_id_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+ struct hinic_mbox_func_to_func *func_to_func = mbox_work->func_to_func;
+ u16 src = mbox_work->src_func_idx;
+ int err;
+
+ err = set_vf_mbox_random_id(func_to_func->hwdev, src);
+ if (err)
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Update vf id(0x%x) random id
fail\n",
+ mbox_work->src_func_idx);
+
+ destroy_work(&mbox_work->work);
+
+ kfree(mbox_work);
+}
+
+bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func,
+ u8 *header)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_mbox_work *mbox_work;
+ u32 random_id;
+ u16 offset, src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+ int vf_in_pf;
+
+ if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random)
+ return true;
+
+ if (!HINIC_IS_PPF(hwdev)) {
+ offset = hinic_glb_pf_vf_offset(hwdev);
+ vf_in_pf = src - offset;
+
+ if (vf_in_pf < 1 || vf_in_pf > hinic_func_max_vf(hwdev)) {
+ sdk_warn(hwdev->dev_hdl,
+ "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n",
+ src, (offset + 1),
+ (hinic_func_max_vf(hwdev) + offset));
+ return false;
+ }
+ }
+
+ random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN +
+ MBOX_HEADER_SZ));
+
+ if (random_id == func_to_func->vf_mbx_rand_id[src] ||
+ random_id == func_to_func->vf_mbx_old_rand_id[src])
+ return true;
+
+ sdk_warn(hwdev->dev_hdl,
+ "Receive func_id(0x%x) mailbox random id(0x%x) mismatch with pf
reserve(0x%x)\n",
+ src, random_id, func_to_func->vf_mbx_rand_id[src]);
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Allocate mbox work memory
failed.\n");
+ return false;
+ }
+
+ mbox_work->func_to_func = func_to_func;
+ mbox_work->src_func_idx = src;
+
+ INIT_WORK(&mbox_work->work, update_random_id_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return false;
+}
+
+void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+ u64 mbox_header = *((u64 *)header);
+ u64 src, dir;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+
+ dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
+ src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (src >= HINIC_MAX_FUNCTIONS) {
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Mailbox source function id:%u is invalid\n", (u32)src);
+ return;
+ }
+
+ if (!check_vf_mbox_random_id(func_to_func, header))
+ return;
+
+ recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
+ &func_to_func->mbox_send[src] :
+ &func_to_func->mbox_resp[src];
+
+ recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
+}
+
+void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_send_mbox *send_mbox;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+ send_mbox = &func_to_func->send_mbox;
+
+ complete(&send_mbox->send_done);
+}
+
+static void clear_mbox_status(struct hinic_send_mbox *mbox)
+{
+ *mbox->wb_status = 0;
+
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_copy_header(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, u64 *header)
+{
+ u32 *data = (u32 *)header;
+ u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
+}
+
+static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, void *seg,
+ u16 seg_len)
+{
+ u32 *data = seg;
+ u32 data_len, chk_sz = sizeof(u32);
+ u32 i, idx_max;
+
+ data_len = seg_len;
+ idx_max = ALIGN(data_len, chk_sz) / chk_sz;
+
+ for (i = 0; i < idx_max; i++) {
+ __raw_writel(*(data + i),
+ mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
+ }
+}
+
+static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
+ u16 dst_func, u16 dst_aeqn, u16 seg_len,
+ int poll)
+{
+ u32 mbox_int, mbox_ctrl;
+
+ /* msg_len - the total mbox msg len */
+ u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
+
+ mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
+ HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
+ HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
+ HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
+ MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
+ TX_SIZE) |
+ HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
+ HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
+
+ hinic_hwif_write_reg(func_to_func->hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
+
+ wmb(); /* writing the mbox int attributes */
+ mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
+
+ if (poll)
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
+ else
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
+
+ hinic_hwif_write_reg(func_to_func->hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
+}
+
+void dump_mox_reg(struct hinic_hwdev *hwdev)
+{
+ u32 val;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
+ sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val);
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
+ sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val);
+}
+
+static u16 get_mbox_status(struct hinic_send_mbox *mbox)
+{
+ /* write back is 16B, but only use first 4B */
+ u64 wb_val = be64_to_cpu(*mbox->wb_status);
+
+ rmb(); /* verify reading before check */
+
+ return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
+}
+
+static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
+ u64 header, u16 dst_func, void *seg, u16 seg_len,
+ int poll, void *msg_info)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ u16 dst_aeqn, wb_status = 0, errcode;
+ u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
+ struct completion *done = &send_mbox->send_done;
+ ulong jif;
+ u32 cnt = 0;
+
+ if (num_aeqs >= 4)
+ dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
+ HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
+ else
+ dst_aeqn = 0;
+
+ if (!poll)
+ init_completion(done);
+
+ clear_mbox_status(send_mbox);
+
+ mbox_copy_header(hwdev, send_mbox, &header);
+
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+
+ write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
+
+ wmb(); /* writing the mbox msg attributes */
+
+ if (poll) {
+ while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
+ wb_status = get_mbox_status(send_mbox);
+ if (MBOX_STATUS_FINISHED(wb_status))
+ break;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
+ sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status:
0x%x\n",
+ wb_status);
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+ } else {
+ jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(done, jif)) {
+ sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout\n");
+ dump_mox_reg(hwdev);
+ destroy_completion(done);
+ return -ETIMEDOUT;
+ }
+ destroy_completion(done);
+
+ wb_status = get_mbox_status(send_mbox);
+ }
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %d error, wb status:
0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ int err = 0;
+ int seq_id = 0;
+ u16 seg_len = MBOX_SEG_LEN;
+ u16 left = msg_len;
+ u8 *msg_seg = (u8 *)msg;
+ u64 header = 0;
+
+ down(&func_to_func->msg_send_sem);
+
+ header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MBOX_HEADER_SET(mod, MODULE) |
+ HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
+ HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
+ HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
+ HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
+ HINIC_MBOX_HEADER_SET(cmd, CMD) |
+ /* The vf's offset to it's associated pf */
+ HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
+ HINIC_MBOX_HEADER_SET(hinic_global_func_id(hwdev),
+ SRC_GLB_FUNC_IDX);
+
+ while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
+ if (left <= HINIC_MBOX_SEG_LEN) {
+ header &= ~MBOX_SEGLEN_MASK;
+ header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
+ header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
+
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
+ seg_len, MBOX_SEND_MSG_INT, msg_info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n",
+ HINIC_MBOX_HEADER_GET(header, SEQID));
+ goto send_err;
+ }
+
+ left -= HINIC_MBOX_SEG_LEN;
+ msg_seg += HINIC_MBOX_SEG_LEN;
+
+ seq_id++;
+ header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
+ SEQID));
+ header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
+ }
+
+send_err:
+ up(&func_to_func->msg_send_sem);
+
+ return err;
+}
+
+static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ func_to_func->event_flag = event_flag;
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ /* use mbox_resp to hole data which responsed from other function */
+ struct hinic_recv_mbox *mbox_for_resp;
+ struct mbox_msg_info msg_info = {0};
+ ulong timeo;
+ int err;
+
+ if (!func_to_func->hwdev->chip_present_flag)
+ return -EPERM;
+
+ mbox_for_resp = &func_to_func->mbox_resp[dst_func];
+
+ down(&func_to_func->mbox_send_sem);
+
+ init_completion(&mbox_for_resp->recv_done);
+
+ msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
+
+ set_mbox_to_func_event(func_to_func, EVENT_START);
+
+ err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
+ dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
+ &msg_info);
+ if (err) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox failed, msg_id:
%d\n",
+ msg_info.msg_id);
+ set_mbox_to_func_event(func_to_func, EVENT_FAIL);
+ goto send_err;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
+ set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto send_err;
+ }
+
+ set_mbox_to_func_event(func_to_func, EVENT_END);
+
+ if (mbox_for_resp->msg_info.status) {
+ err = mbox_for_resp->msg_info.status;
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ sdk_err(func_to_func->hwdev->dev_hdl, "Mbox response error(0x%x)\n",
+ mbox_for_resp->msg_info.status);
+ goto send_err;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < mbox_for_resp->mbox_len) {
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Invalid response mbox message length: %d for mod %d cmd %d, should less than:
%d\n",
+ mbox_for_resp->mbox_len, mod, cmd, *out_size);
+ err = -EFAULT;
+ goto send_err;
+ }
+
+ if (mbox_for_resp->mbox_len)
+ memcpy(buf_out, mbox_for_resp->mbox,
+ mbox_for_resp->mbox_len);
+
+ *out_size = mbox_for_resp->mbox_len;
+ }
+
+send_err:
+ destroy_completion(&mbox_for_resp->recv_done);
+ up(&func_to_func->mbox_send_sem);
+
+ return err;
+}
+
+static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
+ void *buf_in, u16 in_size)
+{
+ if (!buf_in || !in_size)
+ return -EINVAL;
+
+ if (in_size > HINIC_MBOX_DATA_SIZE) {
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Mbox msg len(%d) exceed limit(%d)\n",
+ in_size, HINIC_MBOX_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err;
+
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (!HINIC_IS_PPF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, only ppf can send message to other host,
func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dest_host_ppf_id,
+ buf_in, in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF(hwdev) || HINIC_IS_PPF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, hinic_ppf_idx(hwdev),
+ buf_in, in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ if (!HINIC_IS_VF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ /* port_to_port_idx - imply which PCIE interface PF is connected */
+ return hinic_mbox_to_func(func_to_func, mod, cmd,
+ hinic_pf_id_of_vf(hwdev), buf_in, in_size,
+ buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size)
+{
+ struct mbox_msg_info msg_info = {0};
+ int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ down(&hwdev->func_to_func->mbox_send_sem);
+
+ err = send_mbox_to_func(hwdev->func_to_func, mod, cmd, buf_in, in_size,
+ func_idx, HINIC_HWIF_DIRECT_SEND, MBOX_NO_ACK,
+ &msg_info);
+ if (err)
+ sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n");
+
+ up(&hwdev->func_to_func->mbox_send_sem);
+
+ return err;
+}
+
+int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size)
+{
+ return hinic_mbox_to_func_no_ack(hwdev, hinic_pf_id_of_vf(hwdev), mod,
+ cmd, buf_in, in_size);
+}
+
+int __hinic_mbox_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ int err;
+ u16 dst_func_idx;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func;
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type:
%d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ if (!vf_id) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "VF id(%d) error!\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* vf_offset_to_pf + vf_id is the vf's global function id of vf in
+ * this pf
+ */
+ dst_func_idx = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_ppf_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 func_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func;
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type:
%d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, func_id, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+EXPORT_SYMBOL(hinic_mbox_ppf_to_vf);
+
+int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 dst_pf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err;
+
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (!HINIC_IS_PPF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ if (hinic_ppf_idx(hwdev) == dst_pf_id) {
+ sdk_err(hwdev->dev_hdl,
+ "Params error, dst_pf_id(0x%x) is ppf\n", dst_pf_id);
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dst_pf_id, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ int err;
+
+ mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->mbox)
+ return -ENOMEM;
+
+ mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->buf_out) {
+ err = -ENOMEM;
+ goto alloc_buf_out_err;
+ }
+
+ return 0;
+
+alloc_buf_out_err:
+ kfree(mbox_info->mbox);
+
+ return err;
+}
+
+static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ kfree(mbox_info->buf_out);
+ kfree(mbox_info->mbox);
+}
+
+static int alloc_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx, i;
+ int err;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
+ err = init_mbox_info(&mbox_info[func_idx]);
+ if (err) {
+ pr_err("Failed to init mbox info\n");
+ goto init_mbox_info_err;
+ }
+ }
+
+ return 0;
+
+init_mbox_info_err:
+ for (i = 0; i < func_idx; i++)
+ clean_mbox_info(&mbox_info[i]);
+
+ return err;
+}
+
+static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
+ clean_mbox_info(&mbox_info[func_idx]);
+}
+
+static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+
+ send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ send_mbox->wb_status = send_mbox->wb_vaddr;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr,
+ send_mbox->wb_paddr);
+}
+
+int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev)
+{
+ u8 vf_in_pf;
+ int err = 0;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ for (vf_in_pf = 1; vf_in_pf <= hinic_func_max_vf(hwdev); vf_in_pf++) {
+ err = set_vf_mbox_random_id(hwdev,
+ (hinic_glb_pf_vf_offset(hwdev) +
+ vf_in_pf));
+ if (err)
+ break;
+ }
+
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ hwdev->func_to_func->support_vf_random = false;
+ err = 0;
+ sdk_warn(hwdev->dev_hdl, "Mgmt unsupport set vf random id\n");
+ } else if (!err) {
+ hwdev->func_to_func->support_vf_random = true;
+ sdk_info(hwdev->dev_hdl, "PF Set vf random id success\n");
+ }
+
+ return err;
+}
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct card_node *chip_node;
+ int err;
+
+ func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
+ if (!func_to_func)
+ return -ENOMEM;
+
+ hwdev->func_to_func = func_to_func;
+ func_to_func->hwdev = hwdev;
+ chip_node = hwdev->chip_node;
+ func_to_func->vf_mbx_rand_id = chip_node->vf_mbx_rand_id;
+ func_to_func->vf_mbx_old_rand_id = chip_node->vf_mbx_old_rand_id;
+ sema_init(&func_to_func->mbox_send_sem, 1);
+ sema_init(&func_to_func->msg_send_sem, 1);
+ spin_lock_init(&func_to_func->mbox_lock);
+ func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
+ if (!func_to_func->workq) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n");
+ err = -ENOMEM;
+ goto create_mbox_workq_err;
+ }
+
+ err = alloc_mbox_info(func_to_func->mbox_send);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_active fail\n");
+ goto alloc_mbox_for_send_err;
+ }
+
+ err = alloc_mbox_info(func_to_func->mbox_resp);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_passive fail\n");
+ goto alloc_mbox_for_resp_err;
+ }
+
+ err = alloc_mbox_wb_status(func_to_func);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n");
+ goto alloc_wb_status_err;
+ }
+
+ prepare_send_mbox(func_to_func);
+
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, vf_to_pf_handler);
+
+ return 0;
+
+alloc_wb_status_err:
+ free_mbox_info(func_to_func->mbox_resp);
+
+alloc_mbox_for_resp_err:
+ free_mbox_info(func_to_func->mbox_send);
+
+alloc_mbox_for_send_err:
+ destroy_workqueue(func_to_func->workq);
+
+create_mbox_workq_err:
+ kfree(func_to_func);
+
+ return err;
+}
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+
+ free_mbox_wb_status(func_to_func);
+
+ free_mbox_info(func_to_func->mbox_resp);
+
+ free_mbox_info(func_to_func->mbox_send);
+
+ destroy_workqueue(func_to_func->workq);
+
+ spin_lock_deinit(&func_to_func->mbox_lock);
+ sema_deinit(&func_to_func->mbox_send_sem);
+ sema_deinit(&func_to_func->msg_send_sem);
+
+ kfree(func_to_func);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
new file mode 100644
index 000000000000..f5e5c31cfd90
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MBOX_H_
+#define HINIC_MBOX_H_
+
+#define HINIC_MBOX_PF_SEND_ERR 0x1
+#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
+#define HINIC_MBOX_VF_CMD_ERROR 0x3
+
+#define HINIC_MAX_FUNCTIONS 512
+#define HINIC_MAX_PF_FUNCS 16
+
+#define HINIC_MBOX_WQ_NAME "hinic_mbox"
+
+enum hinic_mbox_seg_errcode {
+ MBOX_ERRCODE_NO_ERRORS = 0,
+ /* VF send the mailbox data to the wrong destination functions */
+ MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100,
+ /* PPF send the mailbox data to the wrong destination functions */
+ MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200,
+ /* PF send the mailbox data to the wrong destination functions */
+ MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300,
+ /* The mailbox data size is set to all zero */
+ MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400,
+ /* The sender function attribute has not been learned by CPI hardware */
+ MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500,
+ /* The receiver function attr has not been learned by CPI hardware */
+ MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600,
+};
+
+enum hinic_mbox_ack_type {
+ MBOX_ACK,
+ MBOX_NO_ACK,
+};
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status; /*can only use 6 bit*/
+};
+
+struct hinic_recv_mbox {
+ struct completion recv_done;
+ void *mbox;
+ u8 cmd;
+ enum hinic_mod_type mod;
+ u16 mbox_len;
+ void *buf_out;
+ enum hinic_mbox_ack_type ack_type;
+ struct mbox_msg_info msg_info;
+ u8 sed_id;
+};
+
+struct hinic_send_mbox {
+ struct completion send_done;
+ u8 *data;
+
+ u64 *wb_status; /* write back status */
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+typedef int (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_ppf_mbox_cb)(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+typedef int (*hinic_pf_recv_from_ppf_mbox_cb)(void *handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+enum mbox_event_state {
+ EVENT_START = 0,
+ EVENT_FAIL,
+ EVENT_TIMEOUT,
+ EVENT_END,
+};
+
+struct hinic_mbox_func_to_func {
+ struct hinic_hwdev *hwdev;
+
+ struct semaphore mbox_send_sem;
+ struct semaphore msg_send_sem;
+ struct hinic_send_mbox send_mbox;
+
+ struct workqueue_struct *workq;
+
+ struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
+ struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
+
+ hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
+ hinic_ppf_mbox_cb ppf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_recv_from_ppf_mbox_cb pf_recv_from_ppf_mbox_cb[HINIC_MOD_MAX];
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+ /*lock for mbox event flag*/
+ spinlock_t mbox_lock;
+
+ u32 *vf_mbx_old_rand_id;
+ u32 *vf_mbx_rand_id;
+ bool support_vf_random;
+};
+
+struct hinic_mbox_work {
+ struct work_struct work;
+ u16 src_func_idx;
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+};
+
+struct vf_cmd_check_handle {
+ u8 cmd;
+ bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx,
+ void *buf_in, u16 in_size);
+};
+
+int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_ppf_mbox_cb callback);
+
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback);
+
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback);
+
+int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_recv_from_ppf_mbox_cb callback);
+
+void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size);
+
+void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size);
+
+int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev);
+
+bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size);
+
+bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size);
+
+bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
+ struct vf_cmd_check_handle *cmd_handle,
+ u16 vf_id, u8 cmd, void *buf_in, u16 in_size,
+ u8 size);
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size);
+
+int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size);
+
+int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u16 dst_pf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int __hinic_mbox_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
new file mode 100644
index 000000000000..4a7fd98d1233
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
@@ -0,0 +1,917 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+
+#include "hinic_hwif.h"
+#include "hinic_api_cmd.h"
+#include "hinic_mgmt.h"
+#include "hinic_eqs.h"
+
+#define BUF_OUT_DEFAULT_SIZE 1
+#define SEGMENT_LEN 48
+
+#define MAX_PF_MGMT_BUF_SIZE 2048UL
+
+#define MGMT_MSG_SIZE_MIN 20
+#define MGMT_MSG_SIZE_STEP 16
+#define MGMT_MSG_RSVD_FOR_DEV 8
+
+#define MGMT_MSG_TIMEOUT 5000 /* millisecond */
+
+#define SYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_FLAG 0x200
+
+#define MSG_NO_RESP 0xFFFF
+
+#define MAX_MSG_SZ 2016
+
+#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
+
+#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
+
+#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
+ (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
+
+#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
+
+#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
+ ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
+ | ASYNC_MSG_FLAG)
+
+static void pf_to_mgmt_send_event_set(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ int event_flag)
+{
+ down(&pf_to_mgmt->msg_sem);
+ pf_to_mgmt->event_flag = event_flag;
+ up(&pf_to_mgmt->msg_sem);
+}
+
+/**
+ * hinic_register_mgmt_msg_cb - register sync msg handler for a module
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that this handler will handle its sync messages
+ * @callback: the handler for a sync message that will handle messages
+ **/
+int hinic_register_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod,
+ void *pri_handle, hinic_mgmt_msg_cb callback)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+
+ if (mod >= HINIC_MOD_HW_MAX || !hwdev)
+ return -EFAULT;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return -EINVAL;
+
+ pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback;
+ pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_register_mgmt_msg_cb);
+
+/**
+ * hinic_unregister_mgmt_msg_cb - unregister sync msg handler for a module
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that this handler will handle its sync messages
+ **/
+void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+
+ if (!hwdev)
+ return;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ if (mod < HINIC_MOD_HW_MAX) {
+ pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL;
+ pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL;
+ }
+}
+EXPORT_SYMBOL(hinic_unregister_mgmt_msg_cb);
+
+void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd,
+ comm_up_self_msg_proc proc)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ u8 cmd_idx;
+
+ if (!hwdev || !proc)
+ return;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ cmd_idx = pf_to_mgmt->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Register recv up process failed(cmd=0x%x)\r\n", cmd);
+ return;
+ }
+
+ pf_to_mgmt->proc.info[cmd_idx].cmd = cmd;
+ pf_to_mgmt->proc.info[cmd_idx].proc = proc;
+
+ pf_to_mgmt->proc.cmd_num++;
+}
+
+void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ u8 cmd_idx;
+
+ if (!hwdev)
+ return;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ cmd_idx = pf_to_mgmt->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Unregister recv up process failed(cmd=0x%x)\r\n", cmd);
+ return;
+ }
+
+ for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) {
+ if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) {
+ pf_to_mgmt->proc.info[cmd_idx].cmd = 0;
+ pf_to_mgmt->proc.info[cmd_idx].proc = NULL;
+ pf_to_mgmt->proc.cmd_num--;
+ }
+ }
+}
+
+/**
+ * mgmt_msg_len - calculate the total message length
+ * @msg_data_len: the length of the message data
+ * Return: the total message length
+ **/
+static u16 mgmt_msg_len(u16 msg_data_len)
+{
+ /* u64 - the size of the header */
+ u16 msg_size;
+
+ msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len);
+
+ if (msg_size > MGMT_MSG_SIZE_MIN)
+ msg_size = MGMT_MSG_SIZE_MIN +
+ ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
+ MGMT_MSG_SIZE_STEP);
+ else
+ msg_size = MGMT_MSG_SIZE_MIN;
+
+ return msg_size;
+}
+
+/**
+ * prepare_header - prepare the header of the message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: pointer of the header to prepare
+ * @msg_len: the length of the message
+ * @mod: module in the chip that will get the message
+ * @direction: the direction of the original message
+ * @msg_id: message id
+ **/
+static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u64 *header, int msg_len, enum hinic_mod_type mod,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ enum hinic_mgmt_cmd cmd, u32 msg_id)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif;
+
+ *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+/**
+ * prepare_mgmt_cmd - prepare the mgmt command
+ * @mgmt_cmd: pointer to the command to prepare
+ * @header: pointer of the header to prepare
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ **/
+static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg,
+ int msg_len)
+{
+ memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
+
+ mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
+ memcpy(mgmt_cmd, header, sizeof(*header));
+
+ mgmt_cmd += sizeof(*header);
+ memcpy(mgmt_cmd, msg, msg_len);
+}
+
+/**
+ * send_msg_to_mgmt_async - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ * @direction: the direction of the original message
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev))
+ return -EFAULT;
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+ int err;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+
+ /* Lock the async_msg_buf */
+ spin_lock_bh(&pf_to_mgmt->async_msg_lock);
+ ASYNC_MSG_ID_INC(pf_to_mgmt);
+
+ err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_DIRECT_SEND, MSG_NO_RESP);
+ spin_unlock_bh(&pf_to_mgmt->async_msg_lock);
+
+ if (err) {
+ sdk_err(dev, "Failed to send async mgmt msg\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * send_msg_to_mgmt_sync - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the msg data
+ * @msg_len: the msg data length
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev))
+ return -EFAULT;
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt));
+
+ if (ack_type == HINIC_MSG_ACK)
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START);
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+ struct hinic_recv_msg *recv_msg;
+ struct completion *recv_done;
+ ulong timeo;
+ int err;
+ ulong ret;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+ recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
+ recv_done = &recv_msg->recv_done;
+
+ init_completion(recv_done);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+ if (err) {
+ sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %d\n",
+ pf_to_mgmt->sync_msg_id);
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL);
+ goto unlock_sync_msg;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+ ret = wait_for_completion_timeout(recv_done, timeo);
+ down(&pf_to_mgmt->msg_sem);
+ if (!ret) {
+ sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %d\n",
+ pf_to_mgmt->sync_msg_id);
+ hinic_dump_aeq_info((struct hinic_hwdev *)hwdev);
+ err = -ETIMEDOUT;
+ pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT;
+ up(&pf_to_mgmt->msg_sem);
+ goto unlock_sync_msg;
+ }
+ pf_to_mgmt->event_flag = SEND_EVENT_END;
+ up(&pf_to_mgmt->msg_sem);
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) {
+ destroy_completion(recv_done);
+ up(&pf_to_mgmt->sync_msg_lock);
+ return -ETIMEDOUT;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < recv_msg->msg_len) {
+ sdk_err(dev, "Invalid response message length: %d for mod %d cmd %d from mgmt,
should less than: %d\n",
+ recv_msg->msg_len, mod, cmd, *out_size);
+ err = -EFAULT;
+ goto unlock_sync_msg;
+ }
+
+ if (recv_msg->msg_len)
+ memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
+
+ *out_size = recv_msg->msg_len;
+ }
+
+unlock_sync_msg:
+ destroy_completion(recv_done);
+ up(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ return 0;
+}
+
+/* This function is only used by txrx flush */
+int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+ int err = -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) {
+ sdk_err(dev, "Mgmt module not initialized\n");
+ return -EINVAL;
+ }
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+
+ if (!MSG_SZ_IS_VALID(in_size)) {
+ sdk_err(dev, "Mgmt msg buffer size: %d is not valid\n",
+ in_size);
+ return -EINVAL;
+ }
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+
+ up(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+/**
+ * api cmd write or read bypass defaut use poll, if want to use aeq interrupt,
+ * please set wb_trigger_aeqe to 1
+ **/
+int hinic_api_cmd_write_nack(void *hwdev, u8 dest, void *cmd, u16 size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_api_cmd_chain *chain;
+
+ if (!hwdev || !size || !cmd)
+ return -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) ||
+ hinic_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_WRITE];
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ return hinic_api_cmd_write(chain, dest, cmd, size);
+}
+EXPORT_SYMBOL(hinic_api_cmd_write_nack);
+
+int hinic_api_cmd_read_ack(void *hwdev, u8 dest, void *cmd, u16 size, void *ack,
+ u16 ack_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_api_cmd_chain *chain;
+
+ if (!hwdev || !cmd || (ack_size && !ack))
+ return -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) ||
+ hinic_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_READ];
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ return hinic_api_cmd_read(chain, dest, cmd, size, ack, ack_size);
+}
+EXPORT_SYMBOL(hinic_api_cmd_read_ack);
+
+static void __send_mgmt_ack(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, u16 msg_id)
+{
+ u16 buf_size;
+
+ if (!in_size)
+ buf_size = BUF_OUT_DEFAULT_SIZE;
+ else
+ buf_size = in_size;
+
+ spin_lock_bh(&pf_to_mgmt->async_msg_lock);
+ /* MGMT sent sync msg, send the response */
+ send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd,
+ buf_in, buf_size, HINIC_MSG_RESPONSE,
+ msg_id);
+ spin_unlock_bh(&pf_to_mgmt->async_msg_lock);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, u16 msg_id, int need_resp)
+{
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+ void *buf_out = pf_to_mgmt->mgmt_ack_buf;
+ enum hinic_mod_type tmp_mod = mod;
+ bool ack_first = false;
+ u16 out_size = 0;
+
+ memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+ if (mod >= HINIC_MOD_HW_MAX) {
+ sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n",
+ mod);
+ goto resp;
+ }
+
+ if (!pf_to_mgmt->recv_mgmt_msg_cb[mod]) {
+ sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n",
+ mod);
+ goto resp;
+ }
+
+ ack_first = hinic_mgmt_event_ack_first(mod, cmd);
+ if (ack_first && need_resp) {
+ /* send ack to mgmt first to avoid command timeout in
+ * mgmt(100ms in mgmt);
+ * mgmt to host command don't need any response data from host,
+ * just need ack from host
+ */
+ __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, in_size, msg_id);
+ }
+
+ pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev,
+ pf_to_mgmt->recv_mgmt_msg_data[tmp_mod],
+ cmd, buf_in, in_size,
+ buf_out, &out_size);
+
+resp:
+ if (!ack_first && need_resp)
+ __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size,
+ msg_id);
+}
+
+/**
+ * mgmt_resp_msg_handler - handler for response message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_resp_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg)
+{
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+
+ /* delete async msg */
+ if (recv_msg->msg_id & ASYNC_MSG_FLAG)
+ return;
+
+ down(&pf_to_mgmt->msg_sem);
+ if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id &&
+ pf_to_mgmt->event_flag == SEND_EVENT_START) {
+ complete(&recv_msg->recv_done);
+ } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) {
+ sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event
state=%d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ } else {
+ sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event
state=%d!\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ }
+ up(&pf_to_mgmt->msg_sem);
+}
+
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work =
+ container_of(work, struct hinic_mgmt_msg_handle_work, work);
+
+ mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod,
+ mgmt_work->cmd, mgmt_work->msg,
+ mgmt_work->msg_len, mgmt_work->msg_id,
+ !mgmt_work->async_mgmt_to_pf);
+
+ destroy_work(&mgmt_work->work);
+
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+/**
+ * recv_mgmt_msg_handler - handler a message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: the header of the message
+ * @recv_msg: received message details
+ **/
+static void recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u8 *header, struct hinic_recv_msg *recv_msg)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work;
+ u64 mbox_header = *((u64 *)header);
+ void *msg_body = header + sizeof(mbox_header);
+ u32 seq_id, seq_len;
+ u64 dir;
+
+ /* Don't need to get anything from hw when cmd is async */
+ dir = HINIC_MSG_HEADER_GET(mbox_header, DIRECTION);
+ if (dir == HINIC_MSG_RESPONSE &&
+ HINIC_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG)
+ return;
+
+ seq_len = HINIC_MSG_HEADER_GET(mbox_header, SEG_LEN);
+ seq_id = HINIC_MSG_HEADER_GET(mbox_header, SEQID);
+ seq_id = seq_id * SEGMENT_LEN;
+
+ memcpy((u8 *)recv_msg->msg + seq_id, msg_body, seq_len);
+
+ if (!HINIC_MSG_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_msg->cmd = HINIC_MSG_HEADER_GET(mbox_header, CMD);
+ recv_msg->mod = HINIC_MSG_HEADER_GET(mbox_header, MODULE);
+ recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(mbox_header,
+ ASYNC_MGMT_TO_PF);
+ recv_msg->msg_len = HINIC_MSG_HEADER_GET(mbox_header, MSG_LEN);
+ recv_msg->msg_id = HINIC_MSG_HEADER_GET(mbox_header, MSG_ID);
+
+ if (HINIC_MSG_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_MSG_RESPONSE) {
+ mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
+ return;
+ }
+
+ mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt work memory
failed\n");
+ return;
+ }
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt msg memory
failed\n");
+ kfree(mgmt_work);
+ return;
+ }
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
+}
+
+/**
+ * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event
+ * @handle: PF to MGMT channel
+ * @header: the header of the message
+ * @size: unused
+ **/
+void hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+ bool is_send_dir = false;
+
+ pf_to_mgmt = dev->pf_to_mgmt;
+
+ is_send_dir = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
+ HINIC_MSG_DIRECT_SEND) ? true : false;
+
+ /* ignore mgmt initiative report events when function deinit */
+ if (test_bit(HINIC_HWDEV_FUNC_DEINIT, &dev->func_state) && is_send_dir)
+ return;
+
+ recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
+}
+
+/**
+ * alloc_recv_msg - allocate received message memory
+ * @recv_msg: pointer that will hold the allocated data
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * free_recv_msg - free received message memory
+ * @recv_msg: pointer that holds the allocated data
+ **/
+static void free_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ kfree(recv_msg->msg);
+}
+
+/**
+ * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ int err;
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate recv msg\n");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate resp recv msg\n");
+ goto alloc_msg_for_resp_err;
+ }
+
+ pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->async_msg_buf) {
+ err = -ENOMEM;
+ goto async_msg_buf_err;
+ }
+
+ pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->sync_msg_buf) {
+ err = -ENOMEM;
+ goto sync_msg_buf_err;
+ }
+
+ pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf) {
+ err = -ENOMEM;
+ goto ack_msg_buf_err;
+ }
+
+ return 0;
+
+ack_msg_buf_err:
+ kfree(pf_to_mgmt->sync_msg_buf);
+
+sync_msg_buf_err:
+ kfree(pf_to_mgmt->async_msg_buf);
+
+async_msg_buf_err:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+
+alloc_msg_for_resp_err:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ return err;
+}
+
+/**
+ * free_msg_buf - free all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ **/
+static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->mgmt_ack_buf);
+ kfree(pf_to_mgmt->sync_msg_buf);
+ kfree(pf_to_mgmt->async_msg_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+/**
+ * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * @hwif: HW interface the PF to MGMT will use for accessing HW
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = hwdev->dev_hdl;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt)
+ return -ENOMEM;
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+ spin_lock_init(&pf_to_mgmt->async_msg_lock);
+ sema_init(&pf_to_mgmt->msg_sem, 1);
+ sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+ pf_to_mgmt->workq = create_singlethread_workqueue(HINIC_MGMT_WQ_NAME);
+ if (!pf_to_mgmt->workq) {
+ sdk_err(dev, "Failed to initialize MGMT workqueue\n");
+ err = -ENOMEM;
+ goto create_mgmt_workq_err;
+ }
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate msg buffers\n");
+ goto alloc_msg_buf_err;
+ }
+
+ err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
+ if (err) {
+ sdk_err(dev, "Failed to init the api cmd chains\n");
+ goto api_cmd_init_err;
+ }
+
+ return 0;
+
+api_cmd_init_err:
+ free_msg_buf(pf_to_mgmt);
+
+alloc_msg_buf_err:
+ destroy_workqueue(pf_to_mgmt->workq);
+
+create_mgmt_workq_err:
+ sema_deinit(&pf_to_mgmt->msg_sem);
+ spin_lock_deinit(&pf_to_mgmt->async_msg_lock);
+ sema_deinit(&pf_to_mgmt->sync_msg_lock);
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+/**
+ * hinic_pf_to_mgmt_free - free PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ **/
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ free_msg_buf(pf_to_mgmt);
+ destroy_workqueue(pf_to_mgmt->workq);
+ sema_deinit(&pf_to_mgmt->msg_sem);
+ spin_lock_deinit(&pf_to_mgmt->async_msg_lock);
+ sema_deinit(&pf_to_mgmt->sync_msg_lock);
+ kfree(pf_to_mgmt);
+}
+
+void hinic_flush_mgmt_workq(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+
+ flush_workqueue(dev->aeqs->workq);
+
+ if (hinic_func_type(dev) != TYPE_VF &&
+ hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED))
+ flush_workqueue(dev->pf_to_mgmt->workq);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
new file mode 100644
index 000000000000..2a9f7ad7cac9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MGMT_H_
+#define HINIC_MGMT_H_
+
+#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MSG_HEADER_MODULE_SHIFT 11
+#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
+#define HINIC_MSG_HEADER_SEQID_SHIFT 24
+#define HINIC_MSG_HEADER_LAST_SHIFT 30
+#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MSG_HEADER_CMD_SHIFT 32
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48
+#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50
+#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+
+#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
+#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
+#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
+#define HINIC_MSG_HEADER_LAST_MASK 0x1
+#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MSG_HEADER_CMD_MASK 0xFF
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3
+#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF
+#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+
+#define HINIC_MSG_HEADER_GET(val, member) \
+ (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
+ HINIC_MSG_HEADER_##member##_MASK)
+
+#define HINIC_MSG_HEADER_SET(val, member) \
+ ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
+ HINIC_MSG_HEADER_##member##_SHIFT)
+
+#define HINIC_MGMT_WQ_NAME "hinic_mgmt"
+
+enum hinic_msg_direction_type {
+ HINIC_MSG_DIRECT_SEND = 0,
+ HINIC_MSG_RESPONSE = 1
+};
+
+enum hinic_msg_segment_type {
+ NOT_LAST_SEGMENT = 0,
+ LAST_SEGMENT = 1,
+};
+
+enum hinic_mgmt_msg_type {
+ ASYNC_MGMT_MSG = 0,
+ SYNC_MGMT_MSG = 1,
+};
+
+enum hinic_msg_ack_type {
+ HINIC_MSG_ACK = 0,
+ HINIC_MSG_NO_ACK = 1,
+};
+
+struct hinic_recv_msg {
+ void *msg;
+
+ struct completion recv_done;
+
+ u16 msg_len;
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+#define HINIC_COMM_SELF_CMD_MAX 8
+
+struct comm_up_self_msg_sub_info {
+ u8 cmd;
+ comm_up_self_msg_proc proc;
+};
+
+struct comm_up_self_msg_info {
+ u8 cmd_num;
+ struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX];
+};
+
+enum comm_pf_to_mgmt_event_state {
+ SEND_EVENT_UNINIT = 0,
+ SEND_EVENT_START,
+ SEND_EVENT_FAIL,
+ SEND_EVENT_TIMEOUT,
+ SEND_EVENT_END,
+};
+
+struct hinic_msg_pf_to_mgmt {
+ struct hinic_hwdev *hwdev;
+
+ /* Async cmd can not be scheduling */
+ spinlock_t async_msg_lock;
+ struct semaphore sync_msg_lock;
+
+ struct workqueue_struct *workq;
+
+ void *async_msg_buf;
+ void *sync_msg_buf;
+ void *mgmt_ack_buf;
+
+ struct hinic_recv_msg recv_msg_from_mgmt;
+ struct hinic_recv_msg recv_resp_msg_from_mgmt;
+
+ u16 async_msg_id;
+ u16 sync_msg_id;
+
+ struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+
+ hinic_mgmt_msg_cb recv_mgmt_msg_cb[HINIC_MOD_HW_MAX];
+ void *recv_mgmt_msg_data[HINIC_MOD_HW_MAX];
+
+ void (*async_msg_cb[HINIC_MOD_HW_MAX])(void *handle,
+ enum hinic_mgmt_cmd cmd,
+ void *priv_data, u32 msg_id,
+ void *buf_out, u32 out_size);
+
+ void *async_msg_cb_data[HINIC_MOD_HW_MAX];
+
+ struct comm_up_self_msg_info proc;
+
+ /* lock when sending msg */
+ struct semaphore msg_sem;
+ enum comm_pf_to_mgmt_event_state event_flag;
+};
+
+struct hinic_mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+
+ void *msg;
+ u16 msg_len;
+
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+
+ int async_mgmt_to_pf;
+};
+
+int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size);
+
+void hinic_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size);
+
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev);
+
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev);
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
new file mode 100644
index 000000000000..f4e460624f43
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
@@ -0,0 +1,919 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MGMT_INTERFACE_H
+#define HINIC_MGMT_INTERFACE_H
+
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+#include "hinic_port_cmd.h"
+
+/* up to driver event */
+#define HINIC_PORT_CMD_MGMT_RESET 0x0
+
+struct hinic_register_vf {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct hinic_tx_rate_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 tx_rate;
+};
+
+struct hinic_tx_rate_cfg_max_min {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 min_rate;
+ u32 max_rate;
+ u8 rsvd2[8];
+};
+
+struct hinic_port_mac_get {
+ u16 func_id;
+ u8 mac[ETH_ALEN];
+ int ret;
+};
+
+struct hinic_function_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rx_wqe_buf_size;
+ u32 mtu;
+};
+
+struct hinic_cmd_qpn {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 base_qpn;
+};
+
+struct hinic_port_mac_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct hinic_port_mac_update {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vport_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct hinic_port_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_mtu {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 mtu;
+};
+
+struct hinic_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+};
+
+struct hinic_speed_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 speed;
+};
+
+struct hinic_link_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represent Invalid value */
+ u16 advertised;
+};
+
+struct hinic_set_autoneg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_set_link_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable;
+};
+
+struct hinic_get_link {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 link_status;
+ u8 rsvd1;
+};
+
+struct hinic_link_status_report {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 link_status;
+ u8 port_id;
+};
+
+struct hinic_port_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+ u8 resv2[3];
+};
+
+struct hinic_tso_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
+struct hinic_lro_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_lro_timer {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 type; /* 0: set timer value, 1: get timer value */
+ u8 enable; /* when set lro time, enable should be 1 */
+ u16 rsvd1;
+ u32 timer;
+};
+
+struct hinic_checksum_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
+
+struct hinic_vlan_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 vlan_rx_offload;
+ u8 rsvd1[5];
+};
+
+struct hinic_vlan_filter {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 rsvd1[2];
+ u32 vlan_filter_ctrl;
+};
+
+struct hinic_pause_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct hinic_rx_mode_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_mode;
+};
+
+/* rss */
+struct nic_rss_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u8 entry[HINIC_RSS_INDIR_SIZE];
+};
+
+struct nic_rss_context_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u32 ctx;
+};
+
+struct hinic_rss_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 rss_en;
+ u8 template_id;
+ u8 rq_priority_number;
+ u8 rsvd1[3];
+ u8 prio_tc[HINIC_DCB_UP_MAX];
+};
+
+struct hinic_rss_template_mgmt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 cmd;
+ u8 template_id;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_indir_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 indir[HINIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_template_key {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[HINIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_engine_type {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_context_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u32 context;
+};
+
+struct hinic_up_ets_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 rsvd1[3];
+ u8 up_tc[HINIC_DCB_UP_MAX];
+ u8 pg_bw[HINIC_DCB_PG_MAX];
+ u8 pgid[HINIC_DCB_UP_MAX];
+ u8 up_bw[HINIC_DCB_UP_MAX];
+ u8 prio[HINIC_DCB_PG_MAX];
+};
+
+struct hinic_set_pfc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 pfc_en;
+ u8 pfc_bitmap;
+ u8 rsvd1[4];
+};
+
+struct hinic_set_micro_pfc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 micro_pfc_en;
+ u8 rsvd1;
+ u8 cfg_rq_max;
+ u8 cfg_rq_depth;
+ u16 rq_sm_thd;
+};
+
+struct hinic_cos_up_map {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ /* every bit indicate index of map is valid or not */
+ u8 cos_valid_mask;
+ u16 rsvd1;
+
+ /* user priority in cos(index:cos, value: up) */
+ u8 map[HINIC_DCB_COS_MAX];
+};
+
+struct hinic_set_rq_iq_mapping {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 map[HINIC_MAX_NUM_RQ];
+ u32 num_rqs;
+ u32 rq_depth;
+};
+
+#define HINIC_PFC_SET_FUNC_THD 0
+#define HINIC_PFC_SET_GLB_THD 1
+struct hinic_pfc_thd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 op_type;
+ u16 func_thd;
+ u16 glb_thd;
+};
+
+/* set iq enable to ucode */
+struct hinic_cmd_enable_iq {
+ u8 rq_depth;
+ u8 num_rq;
+ u16 glb_rq_id;
+
+ u16 q_id;
+ u16 lower_thd;
+
+ u16 force_en; /* 1: force unlock, 0: depend on condition */
+ u16 prod_idx;
+};
+
+/* set iq enable to mgmt cpu */
+struct hinic_cmd_enable_iq_mgmt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 rq_depth;
+ u8 num_rq;
+ u16 glb_rq_id;
+
+ u16 q_id;
+ u16 lower_thd;
+
+ u16 force_en; /* 1: force unlock, 0: depend on condition */
+ u16 prod_idx;
+};
+
+struct hinic_port_link_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 link;
+ u8 port_id;
+};
+
+struct hinic_cable_plug_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 plugged; /* 0: unplugged, 1: plugged */
+ u8 port_id;
+};
+
+struct hinic_link_err_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 err_type;
+ u8 port_id;
+};
+
+struct hinic_sync_time_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u64 mstime;
+};
+
+#define HINIC_PORT_STATS_VERSION 0
+
+struct hinic_port_stats_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_port_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ struct hinic_phy_port_stats stats;
+};
+
+struct hinic_cmd_vport_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_vport_stats stats;
+};
+
+struct hinic_vf_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u8 qos;
+ u8 rsvd1[7];
+};
+
+struct hinic_port_ipsu_mac {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 index;
+ u16 func_id;
+ u16 vlan_id;
+ u8 mac[ETH_ALEN];
+};
+
+/* get or set loopback mode, need to modify by base API */
+#define HINIC_INTERNAL_LP_MODE 5
+#define LOOP_MODE_MIN 1
+#define LOOP_MODE_MAX 6
+
+struct hinic_port_loopback {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u32 mode;
+ u32 en;
+};
+
+#define HINIC_COMPILE_TIME_LEN 20
+struct hinic_version_info {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u8 ver[HINIC_FW_VERSION_NAME];
+ u8 time[HINIC_COMPILE_TIME_LEN];
+};
+
+#define ANTI_ATTACK_DEFAULT_CIR 500000
+#define ANTI_ATTACK_DEFAULT_XIR 600000
+#define ANTI_ATTACK_DEFAULT_CBS 10000000
+#define ANTI_ATTACK_DEFAULT_XBS 12000000
+/* set physical port Anti-Attack rate */
+struct hinic_port_anti_attack_rate {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */
+ u32 cir; /* Committed Information Rate */
+ u32 xir; /* eXtended Information Rate */
+ u32 cbs; /* Committed Burst Size */
+ u32 xbs; /* eXtended Burst Size */
+};
+
+struct hinic_clear_sq_resource {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
+struct hinic_super_cqe {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 super_cqe_en;
+};
+
+struct hinic_capture_info {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u32 op_type;
+ u32 func_id;
+ u32 is_en_trx;
+ u32 offset_cos;
+ u32 data_vlan;
+};
+
+struct hinic_vf_dcb_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_dcb_state state;
+};
+
+struct hinic_port_funcs_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id; /* pf_id */
+ u8 drop_en;
+ u8 rsvd1;
+};
+
+struct hinic_reset_link_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_force_pkt_drop {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port;
+ u8 rsvd1[3];
+};
+
+struct hinic_set_link_follow {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 follow_status;
+ u8 rsvd2[3];
+};
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz);
+
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn);
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+
+void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id);
+
+int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info);
+
+int hinic_save_dcb_state(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state);
+
+void hinic_clear_vf_infos(void *hwdev, u16 vf_id);
+
+/* OVS module interface, for BMGW cpath command */
+enum hinic_hiovs_cmd {
+ OVS_SET_CPATH_VLAN = 39,
+ OVS_GET_CPATH_VLAN = 40,
+ OVS_DEL_CPATH_VLAN = 43,
+};
+
+struct cmd_cpath_vlan {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 vlan_id;
+ u16 pf_id;
+};
+
+/* HILINK module interface */
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
+};
+
+enum hilink_info_print_event {
+ HILINK_EVENT_LINK_UP = 1,
+ HILINK_EVENT_LINK_DOWN,
+ HILINK_EVENT_CABLE_PLUGGED,
+ HILINK_EVENT_MAX_TYPE,
+};
+
+enum hinic_link_port_type {
+ LINK_PORT_FIBRE = 1,
+ LINK_PORT_ELECTRIC,
+ LINK_PORT_COPPER,
+ LINK_PORT_AOC,
+ LINK_PORT_BACKPLANE,
+ LINK_PORT_BASET,
+ LINK_PORT_MAX_TYPE,
+};
+
+enum hilink_fibre_subtype {
+ FIBRE_SUBTYPE_SR = 1,
+ FIBRE_SUBTYPE_LR,
+ FIBRE_SUBTYPE_MAX,
+};
+
+enum hilink_fec_type {
+ HILINK_FEC_RSFEC,
+ HILINK_FEC_BASEFEC,
+ HILINK_FEC_NOFEC,
+ HILINK_FEC_MAX_TYPE,
+};
+
+struct hi30_ffe_data {
+ u8 PRE2;
+ u8 PRE1;
+ u8 POST1;
+ u8 POST2;
+ u8 MAIN;
+};
+
+struct hi30_ctle_data {
+ u8 ctlebst[3];
+ u8 ctlecmband[3];
+ u8 ctlermband[3];
+ u8 ctleza[3];
+ u8 ctlesqh[3];
+ u8 ctleactgn[3];
+ u8 ctlepassgn;
+};
+
+struct hi30_dfe_data {
+ u8 fix_tap1_cen;
+ u8 fix_tap1_edg;
+ u8 dfefxtap[6];
+ u8 dfefloattap[6];
+};
+
+struct hilink_sfp_power {
+ u32 rx_power;
+ u32 tx_power;
+ u32 rsvd;
+ u32 is_invalid;
+};
+
+#define HILINK_MAX_LANE 4
+
+struct hilink_lane {
+ u8 lane_used;
+ u8 hi30_ffe[5];
+ u8 hi30_ctle[19];
+ u8 hi30_dfe[14];
+ u8 rsvd4;
+};
+
+struct hinic_link_info {
+ u8 vendor_name[16];
+ /* port type:
+ * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane;
+ * 6 - baseT; 0xffff - unknown
+ *
+ * port subtype:
+ * Only when port_type is fiber:
+ * 1 - SR; 2 - LR
+ */
+ u32 port_type;
+ u32 port_sub_type;
+ u32 cable_length;
+ u8 cable_temp;
+ u8 cable_max_speed;/* 1(G)/10(G)/25(G)... */
+ u8 sfp_type; /* 0 - qsfp; 1 - sfp */
+ u8 rsvd0;
+ u32 power[4]; /* uW; if is sfp, only power[2] is valid */
+
+ u8 an_state; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u16 speed; /* 1(G)/10(G)/25(G)... */
+
+ u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */
+ u8 alos; /* 0 - yes; 1 - no */
+ u8 rx_los; /* 0 - yes; 1 - no */
+ u8 pma_status;
+ u32 pma_dbg_info_reg; /* pma debug info: */
+ u32 pma_signal_ok_reg; /* signal ok: */
+
+ u32 pcs_err_blk_cnt_reg; /* error block counter: */
+ u32 rf_lf_status_reg; /* RF/LF status: */
+ u8 pcs_link_reg; /* pcs link: */
+ u8 mac_link_reg; /* mac link: */
+ u8 mac_tx_en;
+ u8 mac_rx_en;
+ u32 pcs_err_cnt;
+
+ /* struct hinic_hilink_lane: 40 bytes */
+ u8 lane1[40]; /* 25GE lane in old firmware */
+
+ u8 rsvd1[266]; /* hilink machine state */
+
+ u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */
+
+ u8 rsvd2[2];
+};
+
+struct hinic_hilink_link_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 port_id;
+ u8 info_type; /* 1: link up 2: link down 3 cable plugged */
+ u8 rsvd1;
+
+ struct hinic_link_info info;
+
+ u8 rsvd2[352];
+};
+
+struct hinic_link_ksettings_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u8 rsvd2[18]; /* reserved for duplex, port, etc. */
+};
+
+enum hinic_tx_promsic {
+ HINIC_TX_PROMISC_ENABLE = 0,
+ HINIC_TX_PROMISC_DISABLE = 1,
+};
+
+struct hinic_promsic_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+ u16 func_id;
+ u8 cfg;
+ u8 rsvd1;
+};
+
+int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c
b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c
new file mode 100644
index 000000000000..df92040bec98
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwif.h"
+#include "hinic_csr.h"
+#include "hinic_msix_attr.h"
+
+#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
+
+/**
+ * hinic_msix_attr_set - set message attribute of msix entry
+ * @hwif: the hardware interface of a pci function device
+ * @msix_index: msix_index
+ * @pending_limit: the maximum pending interrupt events (unit 8)
+ * @coalesc_timer: coalesc period for interrupt (unit 8 us)
+ * @lli_timer: replenishing period for low latency credit (unit 8 us)
+ * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
+ * @resend_timer_cfg: maximum wait for resending msix message
+ * (unit coalesc period)
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer)
+{
+ u32 msix_ctrl, addr;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) |
+ HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) |
+ HINIC_MSIX_ATTR_SET(lli_timer_cfg, LLI_TIMER) |
+ HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) |
+ HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+
+ return 0;
+}
+
+/**
+ * hinic_msix_attr_get - get message attribute of msix entry
+ * @hwif: the hardware interface of a pci function device
+ * @msix_index: msix_index
+ * @pending_limit: the maximum pending interrupt events (unit 8)
+ * @coalesc_timer: coalesc period for interrupt (unit 8 us)
+ * @lli_timer: replenishing period for low latency credit (unit 8 us)
+ * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
+ * @resend_timer_cfg: maximum wait for resending msix message
+ * (unit coalesc period)
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
+ u8 *pending_limit, u8 *coalesc_timer_cfg,
+ u8 *lli_timer_cfg, u8 *lli_credit_limit,
+ u8 *resend_timer_cfg)
+{
+ u32 addr, val;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
+ *coalesc_timer_cfg = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
+ *lli_timer_cfg = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
+ *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
+ *resend_timer_cfg = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
+
+ return 0;
+}
+
+/**
+ * hinic_msix_attr_cnt_set - set message attribute counters of msix entry
+ * @hwif: the hardware interface of a pci function device
+ * @msix_index: msix_index
+ * @pending_limit: the maximum pending interrupt events (unit 8)
+ * @coalesc_timer: coalesc period for interrupt (unit 8 us)
+ * @lli_timer: replenishing period for low latency interrupt (unit 8 us)
+ * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
+ * @resend_timer_cfg: maximum wait for resending msix message
+ * (unit coalesc period)
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 lli_timer_cnt, u8 lli_credit_cnt,
+ u8 coalesc_timer_cnt, u8 pending_cnt,
+ u8 resend_timer_cnt)
+{
+ u32 msix_ctrl, addr;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ msix_ctrl = HINIC_MSIX_CNT_SET(lli_timer_cnt, LLI_TIMER) |
+ HINIC_MSIX_CNT_SET(lli_credit_cnt, LLI_CREDIT) |
+ HINIC_MSIX_CNT_SET(coalesc_timer_cnt, COALESC_TIMER) |
+ HINIC_MSIX_CNT_SET(pending_cnt, PENDING) |
+ HINIC_MSIX_CNT_SET(resend_timer_cnt, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h
b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h
new file mode 100644
index 000000000000..288b39691ce6
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MSIX_ATTR_H_
+#define HINIC_MSIX_ATTR_H_
+
+#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0
+#define HINIC_MSIX_COALESC_TIMER_SHIFT 8
+#define HINIC_MSIX_LLI_TIMER_SHIFT 16
+#define HINIC_MSIX_LLI_CREDIT_SHIFT 24
+#define HINIC_MSIX_RESEND_TIMER_SHIFT 29
+
+#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFFU
+#define HINIC_MSIX_COALESC_TIMER_MASK 0xFFU
+#define HINIC_MSIX_LLI_TIMER_MASK 0xFFU
+#define HINIC_MSIX_LLI_CREDIT_MASK 0x1FU
+#define HINIC_MSIX_RESEND_TIMER_MASK 0x7U
+
+#define HINIC_MSIX_ATTR_GET(val, member) \
+ (((val) >> HINIC_MSIX_##member##_SHIFT) \
+ & HINIC_MSIX_##member##_MASK)
+
+#define HINIC_MSIX_ATTR_SET(val, member) \
+ (((val) & HINIC_MSIX_##member##_MASK) \
+ << HINIC_MSIX_##member##_SHIFT)
+
+#define HINIC_MSIX_CNT_LLI_TIMER_SHIFT 0
+#define HINIC_MSIX_CNT_LLI_CREDIT_SHIFT 8
+#define HINIC_MSIX_CNT_COALESC_TIMER_SHIFT 8
+#define HINIC_MSIX_CNT_PENDING_SHIFT 8
+#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
+
+#define HINIC_MSIX_CNT_LLI_TIMER_MASK 0xFFU
+#define HINIC_MSIX_CNT_LLI_CREDIT_MASK 0xFFU
+#define HINIC_MSIX_CNT_COALESC_TIMER_MASK 0xFFU
+#define HINIC_MSIX_CNT_PENDING_MASK 0x1FU
+#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U
+
+#define HINIC_MSIX_CNT_SET(val, member) \
+ (((val) & HINIC_MSIX_CNT_##member##_MASK) << \
+ HINIC_MSIX_CNT_##member##_SHIFT)
+
+int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer);
+
+int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
+ u8 *pending_limit, u8 *coalesc_timer_cfg,
+ u8 *lli_timer_cfg, u8 *lli_credit_limit,
+ u8 *resend_timer_cfg);
+
+int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer);
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
new file mode 100644
index 000000000000..8b1b67f3fc14
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_nic_io.h"
+#include "hinic_api_cmd.h"
+#include "hinic_mgmt.h"
+#include "hinic_mbox.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_hwif.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_multi_host_mgmt.h"
+
+#define SLAVE_HOST_STATUS_CLEAR(host_id, val) \
+ ((val) & (~(1U << (host_id))))
+#define SLAVE_HOST_STATUS_SET(host_id, enable) \
+ (((u8)(enable) & 1U) << (host_id))
+#define SLAVE_HOST_STATUS_GET(host_id, val) (!!((val) & (1U << (host_id))))
+
+void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable)
+{
+ u32 reg_val;
+
+ if (HINIC_FUNC_TYPE(hwdev) != TYPE_PPF)
+ return;
+
+ reg_val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_MULT_HOST_SLAVE_STATUS_ADDR);
+
+ reg_val = SLAVE_HOST_STATUS_CLEAR(host_id, reg_val);
+ reg_val |= SLAVE_HOST_STATUS_SET(host_id, enable);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_MULT_HOST_SLAVE_STATUS_ADDR,
+ reg_val);
+
+ sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n",
+ host_id, enable, reg_val);
+}
+
+bool get_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id)
+{
+ u32 reg_val;
+
+ if (HINIC_FUNC_TYPE(hwdev) != TYPE_PPF)
+ return false;
+
+ reg_val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_MULT_HOST_SLAVE_STATUS_ADDR);
+
+ return SLAVE_HOST_STATUS_GET(host_id, reg_val);
+}
+
+void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable)
+{
+ u32 reg_val;
+
+ if (!IS_MASTER_HOST(hwdev) || HINIC_FUNC_TYPE(hwdev) != TYPE_PPF)
+ return;
+
+ reg_val = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR);
+ reg_val = MULTI_HOST_REG_CLEAR(reg_val, MASTER_MBX_STS);
+ reg_val |= MULTI_HOST_REG_SET((u8)enable, MASTER_MBX_STS);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR, reg_val);
+
+ sdk_info(hwdev->dev_hdl, "multi-host status %d, reg value: 0x%x\n",
+ enable, reg_val);
+}
+
+bool get_master_host_mbox_enable(struct hinic_hwdev *hwdev)
+{
+ u32 reg_val;
+
+ if (!IS_SLAVE_HOST(hwdev) || HINIC_FUNC_TYPE(hwdev) == TYPE_VF)
+ return true;
+
+ reg_val = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR);
+
+ return !!MULTI_HOST_REG_GET(reg_val, MASTER_MBX_STS);
+}
+
+void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode)
+{
+ switch (mode) {
+ case FUNC_MOD_MULTI_BM_MASTER:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host BM master host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_BM_MASTER;
+ hwdev->feature_cap = HINIC_MULTI_BM_MASTER;
+ break;
+ case FUNC_MOD_MULTI_BM_SLAVE:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host BM slave host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_BM_SLAVE;
+ hwdev->feature_cap = HINIC_MULTI_BM_SLAVE;
+ break;
+ case FUNC_MOD_MULTI_VM_MASTER:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host VM master host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_VM_MASTER;
+ hwdev->feature_cap = HINIC_MULTI_VM_MASTER;
+ break;
+ case FUNC_MOD_MULTI_VM_SLAVE:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host VM slave host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_VM_SLAVE;
+ hwdev->feature_cap = HINIC_MULTI_VM_SLAVE;
+ break;
+ default:
+ hwdev->func_mode = FUNC_MOD_NORMAL_HOST;
+ hwdev->feature_cap = HINIC_NORMAL_HOST_CAP;
+ break;
+ }
+}
+
+int rectify_host_mode(struct hinic_hwdev *hwdev)
+{
+ u16 cur_sdi_mode;
+ int err;
+
+ if (hwdev->board_info.board_type !=
+ HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE)
+ return 0;
+
+ sdk_info(hwdev->dev_hdl, "Rectify host mode, host_id: %d\n",
+ hinic_pcie_itf_id(hwdev));
+
+ err = hinic_get_sdi_mode(hwdev, &cur_sdi_mode);
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED)
+ cur_sdi_mode = HINIC_SDI_MODE_BM;
+ else if (err)
+ return err;
+
+ switch (cur_sdi_mode) {
+ case HINIC_SDI_MODE_BM:
+ if (hinic_pcie_itf_id(hwdev) == 0)
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_MASTER);
+ else
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE);
+ break;
+ case HINIC_SDI_MODE_VM:
+ if (hinic_pcie_itf_id(hwdev) == 0)
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_MASTER);
+ else
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE);
+ break;
+ default:
+ sdk_warn(hwdev->dev_hdl, "Unknown sdi mode %d\n", cur_sdi_mode);
+ break;
+ }
+
+ return 0;
+}
+
+void detect_host_mode_pre(struct hinic_hwdev *hwdev)
+{
+ enum hinic_chip_mode chip_mode;
+
+ /* all pf can set HOST_MODE REG, so don't trust HOST_MODE REG for host0,
+ * get chip mode from mgmt cpu for host0
+ * VF have not right to read HOST_MODE REG, detect mode from board info
+ */
+ if (hinic_pcie_itf_id(hwdev) == 0 ||
+ HINIC_FUNC_TYPE(hwdev) == TYPE_VF) {
+ set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST);
+ return;
+ }
+
+ chip_mode = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR);
+ switch (MULTI_HOST_REG_GET(chip_mode, CHIP_MODE)) {
+ case CHIP_MODE_VMGW:
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE);
+ /* mbox has not initialized, set slave host disable */
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+ break;
+ case CHIP_MODE_BMGW:
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE);
+ /* mbox has not initialized, set slave host disable */
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+ break;
+
+ default:
+ set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST);
+ break;
+ }
+}
+
+int __mbox_to_host(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout,
+ enum hinic_mbox_ack_type ack_type)
+{
+ struct hinic_hwdev *mbox_hwdev = hwdev;
+ u8 dst_host_func_idx;
+ int err;
+
+ if (!IS_MULTI_HOST(hwdev) || HINIC_IS_VF(hwdev))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_PF) {
+ down(&hwdev->ppf_sem);
+ mbox_hwdev = hwdev->ppf_hwdev;
+ if (!mbox_hwdev) {
+ err = -EINVAL;
+ goto release_lock;
+ }
+
+ if (!hinic_is_hwdev_mod_inited(mbox_hwdev,
+ HINIC_HWDEV_MBOX_INITED)) {
+ err = -EPERM;
+ goto release_lock;
+ }
+ }
+
+ if (!mbox_hwdev->chip_present_flag) {
+ err = -EPERM;
+ goto release_lock;
+ }
+
+ if (!get_master_host_mbox_enable(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Master host not initialized\n");
+ err = -EFAULT;
+ goto release_lock;
+ }
+
+ if (!mbox_hwdev->mhost_mgmt) {
+ /* send to master host in default */
+ dst_host_func_idx = 0;
+ } else {
+ dst_host_func_idx = IS_MASTER_HOST(hwdev) ?
+ mbox_hwdev->mhost_mgmt->shost_ppf_idx :
+ mbox_hwdev->mhost_mgmt->mhost_ppf_idx;
+ }
+
+ if (ack_type == MBOX_ACK)
+ err = hinic_mbox_to_host(mbox_hwdev, dst_host_func_idx,
+ mod, cmd, buf_in, in_size,
+ buf_out, out_size,
+ timeout);
+ else
+ err = hinic_mbox_to_func_no_ack(mbox_hwdev, dst_host_func_idx,
+ mod, cmd, buf_in, in_size);
+
+release_lock:
+ if (hinic_func_type(hwdev) == TYPE_PF)
+ up(&hwdev->ppf_sem);
+
+ return err;
+}
+
+int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ if (!hwdev)
+ return -EINVAL;
+
+ return __mbox_to_host((struct hinic_hwdev *)hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size, timeout, MBOX_ACK);
+}
+EXPORT_SYMBOL(hinic_mbox_to_host_sync);
+
+int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size)
+{
+ return __mbox_to_host(hwdev, mod, cmd, buf_in, in_size, NULL, NULL,
+ 0, MBOX_NO_ACK);
+}
+
+static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev,
+ u16 glb_func_idx, u8 *en);
+
+int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_slave_func_nic_state *nic_state, *out_state;
+ int err;
+
+ switch (cmd) {
+ case HINIC_SW_GET_SLAVE_FUNC_NIC_STATE:
+ nic_state = buf_in;
+ out_state = buf_out;
+ *out_size = sizeof(*nic_state);
+
+ /* find nic state in ppf func_nic_en bitmap */
+ err = __get_func_nic_state_from_pf(hwdev, nic_state->func_idx,
+ &out_state->enable);
+ if (err)
+ out_state->status = 1;
+ else
+ out_state->status = 0;
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __master_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx,
+ u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt;
+ struct register_slave_host *slave_host, *out_shost;
+ int err = 0;
+
+ if (!mhost_mgmt)
+ return -ENXIO;
+
+ switch (cmd) {
+ case HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER:
+ slave_host = buf_in;
+ out_shost = buf_out;
+ *out_size = sizeof(*slave_host);
+ mhost_mgmt->shost_registered = true;
+ mhost_mgmt->shost_host_idx = slave_host->host_id;
+ mhost_mgmt->shost_ppf_idx = slave_host->ppf_idx;
+
+ bitmap_copy((ulong *)out_shost->funcs_nic_en,
+ mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS);
+ sdk_info(hwdev->dev_hdl, "slave host register ppf, host_id: %d, ppf_idx:
%d\n",
+ slave_host->host_id, slave_host->ppf_idx);
+
+ out_shost->status = 0;
+ break;
+ case HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER:
+ slave_host = buf_in;
+ mhost_mgmt->shost_registered = false;
+ sdk_info(hwdev->dev_hdl, "slave host unregister ppf, host_id: %d, ppf_idx:
%d\n",
+ slave_host->host_id, slave_host->ppf_idx);
+
+ *out_size = sizeof(*slave_host);
+ ((struct register_slave_host *)buf_out)->status = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int __event_set_func_nic_state(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_event_info event_info = {0};
+ struct hinic_mhost_nic_func_state nic_state = {0};
+ struct hinic_slave_func_nic_state *out_state, *func_nic_state = buf_in;
+
+ event_info.type = HINIC_EVENT_MULTI_HOST_MGMT;
+ event_info.mhost_mgmt.sub_cmd = HINIC_MHOST_NIC_STATE_CHANGE;
+ event_info.mhost_mgmt.data = &nic_state;
+
+ nic_state.func_idx = func_nic_state->func_idx;
+ nic_state.enable = func_nic_state->enable;
+
+ if (!hwdev->event_callback)
+ return -EFAULT;
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ *out_size = sizeof(*out_state);
+ out_state = buf_out;
+ out_state->status = nic_state.status;
+
+ return nic_state.status;
+}
+
+static int multi_host_event_handler(struct hinic_hwdev *hwdev,
+ u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ int err;
+
+ switch (cmd) {
+ case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE:
+ err = __event_set_func_nic_state(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int sw_fwd_msg_to_vf(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_host_fwd_head *fwd_head;
+ u16 fwd_head_len;
+ void *msg;
+ int err;
+
+ fwd_head = buf_in;
+ fwd_head_len = sizeof(struct hinic_host_fwd_head);
+ msg = (void *)((u8 *)buf_in + fwd_head_len);
+ err = hinic_mbox_ppf_to_vf(hwdev, fwd_head->mod,
+ fwd_head->dst_glb_func_idx, fwd_head->cmd,
+ msg, (in_size - fwd_head_len),
+ buf_out, out_size, 0);
+ if (err)
+ nic_err(hwdev->dev_hdl,
+ "Fwd msg to func %u failed, err: %d\n",
+ fwd_head->dst_glb_func_idx, err);
+
+ return err;
+}
+
+static int __slave_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx,
+ u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt;
+ struct hinic_slave_func_nic_state *nic_state;
+ int err = 0;
+
+ if (!mhost_mgmt)
+ return -ENXIO;
+
+ switch (cmd) {
+ case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE:
+ nic_state = buf_in;
+
+ *out_size = sizeof(*nic_state);
+ ((struct hinic_slave_func_nic_state *)buf_out)->status = 0;
+
+ sdk_info(hwdev->dev_hdl, "slave func %d %s nic\n",
+ nic_state->func_idx,
+ nic_state->enable ? "register" : "unregister");
+
+ if (nic_state->enable)
+ set_bit(nic_state->func_idx, mhost_mgmt->func_nic_en);
+ else
+ clear_bit(nic_state->func_idx, mhost_mgmt->func_nic_en);
+
+ multi_host_event_handler(hwdev, cmd, buf_in, in_size, buf_out,
+ out_size);
+
+ break;
+
+ case HINIC_SW_CMD_SEND_MSG_TO_VF:
+ err = sw_fwd_msg_to_vf(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+int sw_func_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ int err;
+
+ if (IS_MASTER_HOST(hwdev))
+ err = __master_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in,
+ in_size, buf_out, out_size);
+ else if (IS_SLAVE_HOST(hwdev))
+ err = __slave_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in,
+ in_size, buf_out, out_size);
+ else
+ err = -EINVAL;
+
+ if (err)
+ sdk_err(hwdev->dev_hdl, "PPF process sw funcs cmd %d failed, err: %d\n",
+ cmd, err);
+
+ return err;
+}
+
+int __ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ int err;
+
+ if (IS_SLAVE_HOST(hwdev)) {
+ err = hinic_mbox_to_host_sync(hwdev, mod, cmd,
+ buf_in, in_size, buf_out,
+ out_size, 0);
+ if (err)
+ sdk_err(hwdev->dev_hdl, "send to mpf failed, err: %d\n",
+ err);
+ } else if (IS_MASTER_HOST(hwdev)) {
+ if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_START_FLR)
+ err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in,
+ in_size);
+ else
+ err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size, 0U);
+ if (err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ sdk_err(hwdev->dev_hdl, "PF mbox common callback handler err: %d\n",
+ err);
+ } else {
+ /* not support */
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ bool same_host = false;
+ int err = -EFAULT;
+
+ /* TODO: receive message from other host? get host id from pf_id */
+ /* modify same_host according to hinic_get_hw_pf_infos */
+
+ switch (hwdev->func_mode) {
+ case FUNC_MOD_MULTI_VM_MASTER:
+ case FUNC_MOD_MULTI_BM_MASTER:
+ if (!same_host)
+ err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id,
+ mod, cmd, buf_in, in_size,
+ buf_out, out_size);
+ else
+ sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message in BM
master\n");
+
+ break;
+ case FUNC_MOD_MULTI_VM_SLAVE:
+ case FUNC_MOD_MULTI_BM_SLAVE:
+ same_host = true;
+ if (same_host)
+ err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id,
+ mod, cmd, buf_in, in_size,
+ buf_out, out_size);
+ else
+ sdk_warn(hwdev->dev_hdl, "Receive control message from BM master, don't
support for now\n");
+
+ break;
+ default:
+ sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message\n");
+
+ break;
+ }
+
+ return err;
+}
+
+int comm_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, HINIC_MOD_COMM,
+ cmd, buf_in, in_size, buf_out,
+ out_size);
+}
+
+void comm_ppf_to_pf_handler(void *handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+
+ sdk_err(hwdev->dev_hdl, "pf receive ppf common mbox msg, don't supported for
now\n");
+}
+
+int hilink_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id,
+ HINIC_MOD_HILINK, cmd, buf_in,
+ in_size, buf_out, out_size);
+}
+
+int hinic_nic_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id,
+ HINIC_MOD_L2NIC, cmd, buf_in, in_size,
+ buf_out, out_size);
+}
+
+void hinic_nic_ppf_to_pf_handler(void *handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+
+ sdk_err(hwdev->dev_hdl, "ppf receive other pf l2nic mbox msg, don't
supported for now\n");
+}
+
+int hinic_register_slave_ppf(struct hinic_hwdev *hwdev, bool registered)
+{
+ struct register_slave_host host_info = {0};
+ u16 out_size = sizeof(host_info);
+ u8 cmd;
+ int err;
+
+ if (!IS_SLAVE_HOST(hwdev))
+ return -EINVAL;
+
+ cmd = registered ? HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER :
+ HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER;
+
+ host_info.host_id = hinic_pcie_itf_id(hwdev);
+ host_info.ppf_idx = hinic_ppf_idx(hwdev);
+
+ err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC, cmd,
+ &host_info, sizeof(host_info), &host_info,
+ &out_size, 0);
+ if (err || !out_size || host_info.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to %s slave host, err: %d, out_size: 0x%x,
status: 0x%x\n",
+ registered ? "register" : "unregister", err, out_size,
+ host_info.status);
+ return -EFAULT;
+ }
+ bitmap_copy(hwdev->mhost_mgmt->func_nic_en,
+ (ulong *)host_info.funcs_nic_en,
+ HINIC_MAX_FUNCTIONS);
+ return 0;
+}
+
+static int get_host_id_by_func_id(struct hinic_hwdev *hwdev, u16 func_idx,
+ u8 *host_id)
+{
+ struct hinic_hw_pf_infos *pf_infos;
+ u16 vf_id_start, vf_id_end;
+ int i;
+
+ if (!hwdev || !host_id || !hwdev->mhost_mgmt)
+ return -EINVAL;
+
+ pf_infos = &hwdev->mhost_mgmt->pf_infos;
+
+ for (i = 0; i < pf_infos->num_pfs; i++) {
+ if (func_idx == pf_infos->infos[i].glb_func_idx) {
+ *host_id = pf_infos->infos[i].itf_idx;
+ return 0;
+ }
+
+ vf_id_start = pf_infos->infos[i].glb_pf_vf_offset + 1;
+ vf_id_end = pf_infos->infos[i].glb_pf_vf_offset +
+ pf_infos->infos[i].max_vfs;
+ if (func_idx >= vf_id_start && func_idx <= vf_id_end) {
+ *host_id = pf_infos->infos[i].itf_idx;
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+int set_slave_func_nic_state(struct hinic_hwdev *hwdev, u16 func_idx, u8 en)
+{
+ struct hinic_slave_func_nic_state nic_state = {0};
+ u16 out_size = sizeof(nic_state);
+ int err;
+
+ nic_state.func_idx = func_idx;
+ nic_state.enable = en;
+
+ err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC,
+ HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE,
+ &nic_state, sizeof(nic_state), &nic_state,
+ &out_size, 0);
+ if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) {
+ sdk_warn(hwdev->dev_hdl, "Can not notify func %d nic state because slave host
not initialized\n",
+ func_idx);
+ } else if (err || !out_size || nic_state.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set slave host functions nic state, err: %d,
out_size: 0x%x, status: 0x%x\n",
+ err, out_size, nic_state.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state)
+{
+ struct hinic_hwdev *ppf_hwdev = hwdev;
+ struct hinic_multi_host_mgmt *mhost_mgmt;
+ u8 host_id = 0;
+ bool host_enable;
+ int err;
+
+ if (!hwdev || !state)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) != TYPE_PPF)
+ ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev;
+
+ if (!ppf_hwdev || !IS_MASTER_HOST(ppf_hwdev))
+ return -EINVAL;
+
+ mhost_mgmt = ppf_hwdev->mhost_mgmt;
+ if (!mhost_mgmt || state->func_idx >= HINIC_MAX_FUNCTIONS)
+ return -EINVAL;
+
+ if (state->state == HINIC_FUNC_NIC_DEL)
+ clear_bit(state->func_idx, mhost_mgmt->func_nic_en);
+ else if (state->state == HINIC_FUNC_NIC_ADD)
+ set_bit(state->func_idx, mhost_mgmt->func_nic_en);
+ else
+ return -EINVAL;
+
+ err = get_host_id_by_func_id(ppf_hwdev, state->func_idx, &host_id);
+ if (err) {
+ sdk_err(ppf_hwdev->dev_hdl, "Failed to get function %d host id, err:
%d\n",
+ state->func_idx, err);
+ return -EFAULT;
+ }
+
+ host_enable = get_slave_host_enable(hwdev, host_id);
+ sdk_info(ppf_hwdev->dev_hdl, "Set slave host %d(status: %d) func %d %s
nic\n",
+ host_id, host_enable,
+ state->func_idx, state->state ? "enable" : "disable");
+
+ if (!host_enable)
+ return 0;
+
+ /* notify slave host */
+ err = set_slave_func_nic_state(hwdev, state->func_idx, state->state);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_func_nic_state);
+
+static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev,
+ u16 glb_func_idx, u8 *en)
+{
+ struct hinic_multi_host_mgmt *mhost_mgmt;
+ struct hinic_hwdev *ppf_hwdev = hwdev;
+
+ if (hinic_func_type(hwdev) != TYPE_PPF)
+ ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev;
+
+ if (!ppf_hwdev || !ppf_hwdev->mhost_mgmt)
+ return -EFAULT;
+
+ mhost_mgmt = ppf_hwdev->mhost_mgmt;
+ *en = !!(test_bit(glb_func_idx, mhost_mgmt->func_nic_en));
+
+ sdk_info(ppf_hwdev->dev_hdl, "slave host func %d nic %d\n",
+ glb_func_idx, *en);
+
+ return 0;
+}
+
+int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en)
+{
+ struct hinic_slave_func_nic_state nic_state = {0};
+ u16 out_size = sizeof(nic_state);
+ u8 nic_en;
+ int err;
+
+ if (!hwdev || !en)
+ return -EINVAL;
+
+ if (!IS_SLAVE_HOST((struct hinic_hwdev *)hwdev)) {
+ *en = true;
+ return 0;
+ }
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ nic_state.func_idx = glb_func_idx;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_SW_FUNC,
+ HINIC_SW_GET_SLAVE_FUNC_NIC_STATE,
+ &nic_state, sizeof(nic_state),
+ &nic_state, &out_size, 0);
+ if (err || !out_size || nic_state.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Failed to get func %d nic
state, err: %d, out_size: 0x%x, status: 0x%x\n",
+ glb_func_idx, err, out_size, nic_state.status);
+ return -EFAULT;
+ }
+
+ *en = !!nic_state.enable;
+
+ return 0;
+ }
+
+ /* pf in slave host should be probe in CHIP_MODE_VMGW
+ * mode for pxe install
+ */
+ if (IS_VM_SLAVE_HOST((struct hinic_hwdev *)hwdev)) {
+ *en = true;
+ return 0;
+ }
+
+ /* pf/ppf get function nic state in sdk diretly */
+ err = __get_func_nic_state_from_pf(hwdev, glb_func_idx, &nic_en);
+ if (err)
+ return err;
+
+ *en = !!nic_en;
+
+ return 0;
+}
+
+int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev))
+ return 0;
+
+ hwdev->mhost_mgmt = kzalloc(sizeof(*hwdev->mhost_mgmt), GFP_KERNEL);
+ if (!hwdev->mhost_mgmt) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc multi-host mgmt memory\n");
+ return -ENOMEM;
+ }
+
+ err = hinic_get_hw_pf_infos(hwdev, &hwdev->mhost_mgmt->pf_infos);
+ if (err)
+ goto out_free_mhost_mgmt;
+
+ /* master ppf idx fix to 0 */
+ hwdev->mhost_mgmt->mhost_ppf_idx = 0;
+ /* fix slave host ppf 6 and host 2 in bmwg mode
+ * TODO: get ppf_idx and host_idx according to pf_infos
+ */
+ hwdev->mhost_mgmt->shost_ppf_idx = 6;
+ hwdev->mhost_mgmt->shost_host_idx = 2;
+
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_ppf_mbox_handler);
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ hinic_nic_ppf_mbox_handler);
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK,
+ hilink_ppf_mbox_handler);
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC,
+ sw_func_ppf_mbox_handler);
+
+ bitmap_zero(hwdev->mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS);
+
+ /* Slave host:
+ * register slave host ppf functions
+ * Get function's nic state
+ */
+ if (IS_SLAVE_HOST(hwdev)) {
+ /* PXE don't support to receive mbox from master host */
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), true);
+ if (IS_BMGW_SLAVE_HOST(hwdev)) {
+ err = hinic_register_slave_ppf(hwdev, true);
+ if (err)
+ goto out_free_mhost_mgmt;
+ }
+ } else {
+ /* slave host can send message to mgmt cpu after setup master
+ * mbox
+ */
+ set_master_host_mbox_enable(hwdev, true);
+ }
+
+ return 0;
+
+out_free_mhost_mgmt:
+ kfree(hwdev->mhost_mgmt);
+ hwdev->mhost_mgmt = NULL;
+
+ return err;
+}
+
+int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev))
+ return 0;
+
+ if (IS_SLAVE_HOST(hwdev)) {
+ if (IS_BMGW_SLAVE_HOST(hwdev))
+ hinic_register_slave_ppf(hwdev, false);
+
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+ } else {
+ set_master_host_mbox_enable(hwdev, false);
+ }
+
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK);
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC);
+
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC);
+
+ kfree(hwdev->mhost_mgmt);
+ hwdev->mhost_mgmt = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
new file mode 100644
index 000000000000..1e79f71e912e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+#ifndef __HINIC_MULTI_HOST_MGMT_H_
+#define __HINIC_MULTI_HOST_MGMT_H_
+
+#define IS_BMGW_MASTER_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_MASTER)
+#define IS_BMGW_SLAVE_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_SLAVE)
+#define IS_VM_MASTER_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_MASTER)
+#define IS_VM_SLAVE_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_SLAVE)
+
+#define IS_MASTER_HOST(hwdev) \
+ (IS_BMGW_MASTER_HOST(hwdev) || IS_VM_MASTER_HOST(hwdev))
+
+#define IS_SLAVE_HOST(hwdev) \
+ (IS_BMGW_SLAVE_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev))
+
+#define IS_MULTI_HOST(hwdev) \
+ (IS_BMGW_MASTER_HOST(hwdev) || IS_BMGW_SLAVE_HOST(hwdev) || \
+ IS_VM_MASTER_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev))
+
+#define NEED_MBOX_FORWARD(hwdev) IS_BMGW_SLAVE_HOST(hwdev)
+
+struct hinic_multi_host_mgmt {
+ struct hinic_hwdev *hwdev;
+
+ /* slave host registered */
+ bool shost_registered;
+ u8 shost_host_idx;
+ u8 shost_ppf_idx;
+
+ /* slave host functios support nic enable */
+ DECLARE_BITMAP(func_nic_en, HINIC_MAX_FUNCTIONS);
+
+ u8 mhost_ppf_idx;
+
+ struct hinic_hw_pf_infos pf_infos;
+};
+
+struct hinic_host_fwd_head {
+ unsigned short dst_glb_func_idx;
+ unsigned char dst_itf_idx;
+ unsigned char mod;
+
+ unsigned char cmd;
+ unsigned char rsv[3];
+};
+
+int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev);
+int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev);
+int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size);
+
+struct register_slave_host {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u8 host_id;
+ u8 ppf_idx;
+ u8 rsvd2[6];
+
+ /* for max 512 functions */
+ u64 funcs_nic_en[8];
+
+ u64 rsvd3[8];
+};
+
+struct hinic_slave_func_nic_state {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u16 func_idx;
+ u8 enable;
+ u8 rsvd1;
+
+ u32 rsvd2[2];
+};
+
+void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable);
+bool get_master_host_mbox_enable(struct hinic_hwdev *hwdev);
+void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable);
+void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode);
+int rectify_host_mode(struct hinic_hwdev *hwdev);
+void detect_host_mode_pre(struct hinic_hwdev *hwdev);
+
+int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic.h
b/drivers/net/ethernet/huawei/hinic/hinic_nic.h
new file mode 100644
index 000000000000..a09c7dadd801
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NIC_H_
+#define HINIC_NIC_H_
+
+#include "hinic_wq.h"
+
+struct hinic_sq {
+ struct hinic_wq *wq;
+
+ u16 q_id;
+
+ u8 owner;
+
+ void *cons_idx_addr;
+
+ u8 __iomem *db_addr;
+ u16 msix_entry_idx;
+};
+
+struct hinic_rq {
+ struct hinic_wq *wq;
+
+ u16 *pi_virt_addr;
+ dma_addr_t pi_dma_addr;
+
+ u16 q_id;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+};
+
+struct hinic_qp {
+ struct hinic_sq sq;
+ struct hinic_rq rq;
+};
+
+struct vf_data_storage {
+ u8 vf_mac_addr[ETH_ALEN];
+ bool registered;
+ bool pf_set_mac;
+ u16 pf_vlan;
+ u8 pf_qos;
+ u32 max_rate;
+ u32 min_rate;
+
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+};
+
+struct hinic_nic_cfg {
+ struct semaphore cfg_lock;
+
+ /* Valid when pfc is disable */
+ bool pause_set;
+ struct nic_pause_config nic_pause;
+
+ u8 pfc_en;
+ u8 pfc_bitmap;
+
+ struct nic_port_info port_info;
+
+ /* percentage of pf link bandwidth */
+ u32 pf_bw_limit;
+};
+
+struct hinic_nic_io {
+ struct hinic_hwdev *hwdev;
+
+ u16 global_qpn;
+ u8 link_status;
+
+ struct hinic_wqs wqs;
+
+ struct hinic_wq *sq_wq;
+ struct hinic_wq *rq_wq;
+
+ u16 max_qps;
+ u16 num_qps;
+ u16 sq_depth;
+ u16 rq_depth;
+ struct hinic_qp *qps;
+
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ u16 max_vfs;
+ struct vf_data_storage *vf_infos;
+
+ struct hinic_dcb_state dcb_state;
+
+ struct hinic_nic_cfg nic_cfg;
+ u16 rx_buff_len;
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
new file mode 100644
index 000000000000..a593c2302bae
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
@@ -0,0 +1,3489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_mbox.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_hwif.h"
+
+static unsigned char set_vf_link_state;
+module_param(set_vf_link_state, byte, 0444);
+MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1
represents link always up, 2 represents link always down. - default is 0.");
+
+#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\
+ hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd, \
+ buf_in, in_size, \
+ buf_out, out_size, 0)
+
+#define l2nic_msg_to_mgmt_async(hwdev, cmd, buf_in, in_size) \
+ hinic_msg_to_mgmt_async(hwdev, HINIC_MOD_L2NIC, cmd, buf_in, in_size)
+
+#define CPATH_FUNC_ID_VALID_LIMIT 2
+#define CHECK_IPSU_15BIT 0X8000
+
+static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value);
+
+static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_function_table *function_table;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ function_table = (struct hinic_function_table *)buf_in;
+
+ if (!function_table->rx_wqe_buf_size)
+ return false;
+
+ return true;
+}
+
+struct vf_cmd_check_handle nic_cmd_support_vf[] = {
+ {HINIC_PORT_CMD_VF_REGISTER, NULL},
+ {HINIC_PORT_CMD_VF_UNREGISTER, NULL},
+
+ {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RX_CSUM, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL,
+ hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_INIT_FUNC, check_func_table},
+ {HINIC_PORT_CMD_SET_LLI_PRI, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL},
+ {HINIC_PORT_CMD_GET_BOOT_VERSION, NULL},
+ {HINIC_PORT_CMD_GET_MICROCODE_VERSION, NULL},
+
+ {HINIC_PORT_CMD_GET_VPORT_ENABLE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_VPORT_ENABLE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_LRO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_PORT_INFO, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_IPSU_MAC, hinic_mbox_check_func_id_10B},
+ {HINIC_PORT_CMD_GET_IPSU_MAC, hinic_mbox_check_func_id_10B},
+
+ {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_CLEAR_SQ_RES, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_SUPER_CQE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_VF_COS, NULL},
+ {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B},
+};
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz)
+{
+ struct hinic_function_table function_table = {0};
+ u16 out_size = sizeof(function_table);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ function_table.version = HINIC_CMD_VER_FUNC_ID;
+ function_table.func_id = hinic_global_func_id(hwdev);
+ function_table.mtu = 0x3FFF; /* default, max mtu */
+ function_table.rx_wqe_buf_size = rx_buf_sz;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_INIT_FUNC,
+ &function_table, sizeof(function_table),
+ &function_table, &out_size, 0);
+ if (err || function_table.status || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to init func table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, function_table.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn)
+{
+ struct hinic_cmd_qpn cmd_qpn = {0};
+ u16 out_size = sizeof(cmd_qpn);
+ int err;
+
+ if (!hwdev || !global_qpn)
+ return -EINVAL;
+
+ cmd_qpn.func_id = hinic_global_func_id(hwdev);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_GLOBAL_QPN,
+ &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn,
+ &out_size, 0);
+ if (err || !out_size || cmd_qpn.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, cmd_qpn.status, out_size);
+ return -EINVAL;
+ }
+
+ *global_qpn = cmd_qpn.base_qpn;
+
+ return 0;
+}
+
+#define HINIC_VLAN_ID_MASK 0x7FFF
+
+int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_set mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || !out_size ||
+ (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST &&
+ mac_info.status != HINIC_PF_SET_VF_ALREADY) ||
+ (mac_info.vlan_id & CHECK_IPSU_15BIT &&
+ mac_info.status == HINIC_MGMT_STATUS_EXIST)) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ if (mac_info.status == HINIC_PF_SET_VF_ALREADY) {
+ nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore set
operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST) {
+ nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update
operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_mac);
+
+int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_set mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || !out_size ||
+ (mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY)) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+ if (mac_info.status == HINIC_PF_SET_VF_ALREADY) {
+ nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore delete
operation.\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_del_mac);
+
+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id,
+ u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_update mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !old_mac || !new_mac)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
+ memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UPDATE_MAC,
+ &mac_info, sizeof(mac_info),
+ &mac_info, &out_size);
+ if (err || !out_size ||
+ (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST &&
+ mac_info.status != HINIC_PF_SET_VF_ALREADY) ||
+ (mac_info.vlan_id & CHECK_IPSU_15BIT &&
+ mac_info.status == HINIC_MGMT_STATUS_EXIST)) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ if (mac_info.status == HINIC_PF_SET_VF_ALREADY) {
+ nic_warn(nic_hwdev->dev_hdl, "PF has already set VF MAC. Ignore update
operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST) {
+ nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update
operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+int hinic_get_default_mac(void *hwdev, u8 *mac_addr)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_set mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ mac_info.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC,
+ &mac_info, sizeof(mac_info),
+ &mac_info, &out_size);
+ if (err || !out_size || mac_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(mac_addr, mac_info.mac, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_mtu mtu_info = {0};
+ u16 out_size = sizeof(mtu_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (new_mtu < HINIC_MIN_MTU_SIZE) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Invalid mtu size, mtu size < 256bytes");
+ return -EINVAL;
+ }
+
+ if (new_mtu > HINIC_MAX_JUMBO_FRAME_SIZE) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid mtu size, mtu size > 9600bytes");
+ return -EINVAL;
+ }
+
+ mtu_info.func_id = hinic_global_func_id(hwdev);
+ mtu_info.mtu = new_mtu;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
+ &mtu_info, sizeof(mtu_info),
+ &mtu_info, &out_size);
+ if (err || !out_size || mtu_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set mtu, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, mtu_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_hiovs_set_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct cmd_cpath_vlan cpath_vlan_info = {0};
+ u16 out_size = sizeof(cpath_vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cpath_vlan_info.pf_id = pf_id;
+ cpath_vlan_info.vlan_id = vlan_id;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_SET_CPATH_VLAN,
+ &cpath_vlan_info, sizeof(cpath_vlan_info),
+ &cpath_vlan_info, &out_size, 0);
+
+ if (err || !out_size || cpath_vlan_info.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to set cpath vlan, err: %d, status: 0x%x,
out_size: 0x%0x\n",
+ err, cpath_vlan_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_hiovs_del_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct cmd_cpath_vlan cpath_vlan_info = {0};
+ u16 out_size = sizeof(cpath_vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cpath_vlan_info.pf_id = pf_id;
+ cpath_vlan_info.vlan_id = vlan_id;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_DEL_CPATH_VLAN,
+ &cpath_vlan_info, sizeof(cpath_vlan_info),
+ &cpath_vlan_info, &out_size, 0);
+
+ if (err || !out_size || cpath_vlan_info.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to delte cpath vlan, err: %d, status: 0x%x,
out_size: 0x%0x\n",
+ err, cpath_vlan_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_config vlan_info = {0};
+ u16 out_size = sizeof(vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_info.func_id = func_id;
+ vlan_info.vlan_id = vlan_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_VLAN,
+ &vlan_info, sizeof(vlan_info),
+ &vlan_info, &out_size);
+ if (err || !out_size || vlan_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to add vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_config vlan_info = {0};
+ u16 out_size = sizeof(vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_info.func_id = func_id;
+ vlan_info.vlan_id = vlan_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_VLAN,
+ &vlan_info, sizeof(vlan_info),
+ &vlan_info, &out_size);
+ if (err || !out_size || vlan_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to delte vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_filter vlan_filter = {0};
+ u16 out_size = sizeof(vlan_filter);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_filter.func_id = hinic_global_func_id(hwdev);
+ vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER,
+ &vlan_filter, sizeof(vlan_filter),
+ &vlan_filter, &out_size);
+ if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ HINIC_IS_VF(nic_hwdev)) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || vlan_filter.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_filter.status, out_size);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_info port_msg = {0};
+ u16 out_size = sizeof(port_msg);
+ int err;
+
+ if (!hwdev || !port_info)
+ return -EINVAL;
+
+ port_msg.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO,
+ &port_msg, sizeof(port_msg),
+ &port_msg, &out_size);
+ if (err || !out_size || port_msg.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_msg.status, out_size);
+ return -EINVAL;
+ }
+
+ port_info->autoneg_cap = port_msg.autoneg_cap;
+ port_info->autoneg_state = port_msg.autoneg_state;
+ port_info->duplex = port_msg.duplex;
+ port_info->port_type = port_msg.port_type;
+ port_info->speed = port_msg.speed;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_port_info);
+
+int hinic_set_autoneg(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_autoneg_cmd autoneg = {0};
+ u16 out_size = sizeof(autoneg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ autoneg.func_id = hinic_global_func_id(hwdev);
+ autoneg.enable = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
+ &autoneg, sizeof(autoneg),
+ &autoneg, &out_size);
+ if (err || !out_size || autoneg.status) {
+ nic_err(dev->dev_hdl, "Failed to %s autoneg, err: %d, status: 0x%x, out size:
0x%x\n",
+ enable ? "enable" : "disable", err, autoneg.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_force_port_relink(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ /* Force port link down and link up */
+ err = hinic_set_port_link_status(hwdev, false);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set port link down\n");
+ return -EFAULT;
+ }
+
+ err = hinic_set_port_link_status(hwdev, true);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set port link up\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported,
+ enum hinic_link_mode *advertised)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_link_mode_cmd link_mode = {0};
+ u16 out_size = sizeof(link_mode);
+ int err;
+
+ if (!hwdev || !supported || !advertised)
+ return -EINVAL;
+
+ link_mode.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ &link_mode, sizeof(link_mode),
+ &link_mode, &out_size);
+ if (err || !out_size || link_mode.status) {
+ nic_err(dev->dev_hdl,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_mode.status, out_size);
+ return -EINVAL;
+ }
+
+ *supported = link_mode.supported;
+ *advertised = link_mode.advertised;
+
+ return 0;
+}
+
+int hinic_set_port_link_status(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_link_status link_status = {0};
+ u16 out_size = sizeof(link_status);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ link_status.func_id = hinic_global_func_id(hwdev);
+ link_status.enable = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_LINK_STATUS,
+ &link_status, sizeof(link_status),
+ &link_status, &out_size);
+ if (err || !out_size || link_status.status) {
+ nic_err(dev->dev_hdl, "Failed to %s port link status, err: %d, status: 0x%x,
out size: 0x%x\n",
+ enable ? "Enable" : "Disable", err, link_status.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_speed(void *hwdev, enum nic_speed_level speed)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ speed_info.func_id = hinic_global_func_id(hwdev);
+ speed_info.speed = speed;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ nic_err(dev->dev_hdl,
+ "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_speed(void *hwdev, enum nic_speed_level *speed)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev || !speed)
+ return -EINVAL;
+
+ speed_info.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get speed, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EINVAL;
+ }
+
+ *speed = speed_info.speed;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_speed);
+
+int hinic_get_link_state(void *hwdev, u8 *link_state)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_get_link get_link = {0};
+ u16 out_size = sizeof(get_link);
+ int err;
+
+ if (!hwdev || !link_state)
+ return -EINVAL;
+
+ if (FUNC_FORCE_LINK_UP(hwdev)) {
+ *link_state = 1;
+ return 0;
+ }
+
+ get_link.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
+ &get_link, sizeof(get_link),
+ &get_link, &out_size);
+ if (err || !out_size || get_link.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get link state, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, get_link.status, out_size);
+ return -EINVAL;
+ }
+
+ *link_state = get_link.link_status;
+
+ return 0;
+}
+
+static int hinic_set_hw_pause_info(void *hwdev,
+ struct nic_pause_config nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_pause_config pause_info = {0};
+ u16 out_size = sizeof(pause_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ pause_info.func_id = hinic_global_func_id(hwdev);
+ pause_info.auto_neg = nic_pause.auto_neg;
+ pause_info.rx_pause = nic_pause.rx_pause;
+ pause_info.tx_pause = nic_pause.tx_pause;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ &pause_info, sizeof(pause_info),
+ &pause_info, &out_size);
+ if (err || !out_size || pause_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set pause info, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, pause_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_cfg *nic_cfg;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_cfg = &nic_hwdev->nic_io->nic_cfg;
+
+ down(&nic_cfg->cfg_lock);
+
+ err = hinic_set_hw_pause_info(hwdev, nic_pause);
+ if (err) {
+ up(&nic_cfg->cfg_lock);
+ return err;
+ }
+
+ nic_cfg->pfc_en = 0;
+ nic_cfg->pfc_bitmap = 0;
+ nic_cfg->pause_set = true;
+ nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg;
+ nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause;
+ nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause;
+
+ up(&nic_cfg->cfg_lock);
+
+ return 0;
+}
+
+int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_pause_config pause_info = {0};
+ u16 out_size = sizeof(pause_info);
+ int err;
+
+ if (!hwdev || !nic_pause)
+ return -EINVAL;
+
+ pause_info.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ &pause_info, sizeof(pause_info),
+ &pause_info, &out_size);
+ if (err || !out_size || pause_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get pause info, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, pause_info.status, out_size);
+ return -EINVAL;
+ }
+
+ nic_pause->auto_neg = pause_info.auto_neg;
+ nic_pause->rx_pause = pause_info.rx_pause;
+ nic_pause->tx_pause = pause_info.tx_pause;
+
+ return 0;
+}
+
+int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_cfg *nic_cfg = &nic_hwdev->nic_io->nic_cfg;
+ int err = 0;
+
+ err = hinic_get_hw_pause_info(hwdev, nic_pause);
+ if (err)
+ return err;
+
+ if (nic_cfg->pause_set || !nic_pause->auto_neg) {
+ nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause;
+ nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_mode(void *hwdev, u32 enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rx_mode_config rx_mode_cfg = {0};
+ u16 out_size = sizeof(rx_mode_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ rx_mode_cfg.func_id = hinic_global_func_id(hwdev);
+ rx_mode_cfg.rx_mode = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
+ &rx_mode_cfg, sizeof(rx_mode_cfg),
+ &rx_mode_cfg, &out_size);
+ if (err || !out_size || rx_mode_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, rx_mode_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* offload feature */
+int hinic_set_rx_vlan_offload(void *hwdev, u8 en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_offload vlan_cfg = {0};
+ u16 out_size = sizeof(vlan_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_cfg.func_id = hinic_global_func_id(hwdev);
+ vlan_cfg.vlan_rx_offload = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD,
+ &vlan_cfg, sizeof(vlan_cfg),
+ &vlan_cfg, &out_size);
+ if (err || !out_size || vlan_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rx vlan offload, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, vlan_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_checksum_offload rx_csum_cfg = {0};
+ u16 out_size = sizeof(rx_csum_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ rx_csum_cfg.func_id = hinic_global_func_id(hwdev);
+ rx_csum_cfg.rx_csum_offload = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+ &rx_csum_cfg, sizeof(rx_csum_cfg),
+ &rx_csum_cfg, &out_size);
+ if (err || !out_size || rx_csum_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rx csum offload, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, rx_csum_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_tx_tso(void *hwdev, u8 tso_en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_tso_config tso_cfg = {0};
+ u16 out_size = sizeof(tso_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ tso_cfg.func_id = hinic_global_func_id(hwdev);
+ tso_cfg.tso_en = tso_en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_TSO,
+ &tso_cfg, sizeof(tso_cfg),
+ &tso_cfg, &out_size);
+ if (err || !out_size || tso_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set tso, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, tso_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num)
+{
+ struct hinic_hwdev *nic_hwdev = hwdev;
+ u8 ipv4_en = 0, ipv6_en = 0;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ ipv4_en = lro_en ? 1 : 0;
+ ipv6_en = lro_en ? 1 : 0;
+
+ nic_info(nic_hwdev->dev_hdl, "Set LRO max wqe number to %u\n", wqe_num);
+
+ err = hinic_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)wqe_num);
+ if (err)
+ return err;
+
+ /* we don't set LRO timer for VF */
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ nic_info(nic_hwdev->dev_hdl, "Set LRO timer to %u\n", lro_timer);
+
+ return hinic_set_rx_lro_timer(hwdev, lro_timer);
+}
+
+static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_lro_timer lro_timer = {0};
+ u16 out_size = sizeof(lro_timer);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ lro_timer.status = 0;
+ lro_timer.type = 0;
+ lro_timer.enable = 1;
+ lro_timer.timer = timer_value;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER,
+ &lro_timer, sizeof(lro_timer),
+ &lro_timer, &out_size);
+ if (lro_timer.status == 0xFF) {
+ /* For this case, we think status (0xFF) is OK */
+ lro_timer.status = 0;
+ nic_err(nic_hwdev->dev_hdl, "Set lro timer not supported by the current FW
version, it will be 1ms default\n");
+ }
+
+ if (err || !out_size || lro_timer.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, lro_timer.status, out_size);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_lro_config lro_cfg = {0};
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ lro_cfg.func_id = hinic_global_func_id(hwdev);
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_wqe_num = max_wqe_num;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err || !out_size || lro_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, lro_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_dcb_set_hw_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_pfc pfc = {0};
+ u16 out_size = sizeof(pfc);
+ int err;
+
+ pfc.pfc_bitmap = pfc_bitmap;
+ pfc.pfc_en = pfc_en;
+ pfc.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PFC,
+ &pfc, sizeof(pfc), &pfc, &out_size);
+ if (err || pfc.status || !out_size) {
+ nic_err(dev->dev_hdl, "Failed to set pfc, err: %d, status: 0x%x, out size:
0x%x\n",
+ err, pfc.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* dcbtool */
+int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg;
+ int err;
+
+ down(&nic_cfg->cfg_lock);
+
+ err = hinic_dcb_set_hw_pfc(hwdev, pfc_en, pfc_bitmap);
+ if (err) {
+ up(&nic_cfg->cfg_lock);
+ return err;
+ }
+
+ nic_cfg->pfc_en = pfc_en;
+ nic_cfg->pfc_bitmap = pfc_bitmap;
+
+ /* pause settings is opposite from pfc */
+ nic_cfg->nic_pause.rx_pause = pfc_en ? 0 : 1;
+ nic_cfg->nic_pause.tx_pause = pfc_en ? 0 : 1;
+
+ up(&nic_cfg->cfg_lock);
+
+ return 0;
+}
+
+int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap)
+{
+ return 0;
+}
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw,
+ u8 *prio)
+{
+ struct hinic_up_ets_cfg ets = {0};
+ u16 out_size = sizeof(ets);
+ u16 up_bw_t = 0;
+ u8 pg_bw_t = 0;
+ int i, err;
+
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ up_bw_t += *(up_bw + i);
+ pg_bw_t += *(pg_bw + i);
+
+ if (*(up_tc + i) > HINIC_DCB_TC_MAX) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Invalid up %d mapping tc: %d\n",
+ i, *(up_tc + i));
+ return -EINVAL;
+ }
+ }
+
+ if (pg_bw_t != 100 || (up_bw_t % 100) != 0) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Invalid pg_bw: %d or up_bw: %d\n", pg_bw_t, up_bw_t);
+ return -EINVAL;
+ }
+
+ ets.port_id = 0; /* reserved */
+ memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX);
+ memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX);
+ memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX);
+ memcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX);
+ memcpy(ets.prio, prio, HINIC_DCB_UP_MAX);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS,
+ &ets, sizeof(ets), &ets, &out_size);
+ if (err || ets.status || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ets.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw,
+ u8 *prio)
+{
+ return 0;
+}
+
+int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up)
+{
+ struct hinic_cos_up_map map = {0};
+ u16 out_size = sizeof(map);
+ int err;
+
+ if (!hwdev || !cos_up)
+ return -EINVAL;
+
+ map.port_id = hinic_physical_port_id(hwdev);
+ map.cos_valid_mask = cos_valid_bitmap;
+ memcpy(map.map, cos_up, sizeof(map.map));
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_COS_UP_MAP,
+ &map, sizeof(map), &map, &out_size);
+ if (err || map.status || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set cos2up map, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, map.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map)
+{
+ struct hinic_hwdev *dev;
+ struct hinic_nic_io *nic_io;
+ struct hinic_set_rq_iq_mapping rq_iq_mapping = {0};
+ u16 out_size = sizeof(rq_iq_mapping);
+ int err;
+
+ if (!hwdev || !map || num_rqs > HINIC_MAX_NUM_RQ)
+ return -EINVAL;
+
+ dev = hwdev;
+ nic_io = dev->nic_io;
+
+ rq_iq_mapping.func_id = hinic_global_func_id(hwdev);
+ rq_iq_mapping.num_rqs = num_rqs;
+ rq_iq_mapping.rq_depth = (u16)ilog2(nic_io->rq_depth);
+
+ memcpy(rq_iq_mapping.map, map, num_rqs);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
+ &rq_iq_mapping, sizeof(rq_iq_mapping),
+ &rq_iq_mapping, &out_size);
+ if (err || !out_size || rq_iq_mapping.status) {
+ nic_err(dev->dev_hdl, "Failed to set rq cos mapping, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, rq_iq_mapping.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_pfc_threshold(void *hwdev, u16 op_type, u16 threshold)
+{
+ struct hinic_pfc_thd pfc_thd = {0};
+ u16 out_size = sizeof(pfc_thd);
+ int err;
+
+ if (op_type == HINIC_PFC_SET_FUNC_THD)
+ pfc_thd.func_thd = threshold;
+ else if (op_type == HINIC_PFC_SET_GLB_THD)
+ pfc_thd.glb_thd = threshold;
+ else
+ return -EINVAL;
+
+ pfc_thd.func_id = hinic_global_func_id(hwdev);
+ pfc_thd.op_type = op_type;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PFC_THD,
+ &pfc_thd, sizeof(pfc_thd),
+ &pfc_thd, &out_size);
+ if (err || !out_size || pfc_thd.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set pfc threshold, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pfc_thd.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_bp_thd(void *hwdev, u16 threshold)
+{
+ int err;
+
+ err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_GLB_THD, threshold);
+ if (err) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set global threshold\n");
+ return -EFAULT;
+ }
+
+ err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_FUNC_THD, threshold);
+ if (err) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set function threshold\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_disable_fw_bp(void *hwdev)
+{
+ int err;
+
+ err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_FUNC_THD, 0);
+ if (err) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to disable ucode backpressure\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_iq_enable(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_cmd_enable_iq *iq_info;
+ struct hinic_cmd_buf *cmd_buf;
+ int err;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(dev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ iq_info = cmd_buf->buf;
+ cmd_buf->size = sizeof(*iq_info);
+
+ iq_info->force_en = 0;
+ iq_info->rq_depth = (u8)ilog2(dev->nic_io->rq_depth);
+ iq_info->num_rq = (u8)dev->nic_io->max_qps;
+ /* num_qps will not lager than 64 */
+ iq_info->glb_rq_id = dev->nic_io->global_qpn + q_id;
+ iq_info->q_id = q_id;
+ iq_info->lower_thd = lower_thd;
+ iq_info->prod_idx = prod_idx;
+ hinic_cpu_to_be32(iq_info, sizeof(*iq_info));
+
+ err = hinic_cmdq_async(hwdev, HINIC_ACK_TYPE_CMDQ, HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_IQ_ENABLE, cmd_buf);
+ if (err) {
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+ nic_err(dev->dev_hdl, "Failed to set iq enable, err:%d\n", err);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_iq_enable_mgmt(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_cmd_enable_iq_mgmt iq_info = {0};
+ int err;
+
+ iq_info.force_en = 0;
+
+ iq_info.rq_depth = (u8)ilog2(dev->nic_io->rq_depth);
+ iq_info.num_rq = (u8)dev->nic_io->max_qps;
+ /* num_qps will not lager than 64 */
+ iq_info.glb_rq_id = dev->nic_io->global_qpn + q_id;
+ iq_info.q_id = q_id;
+ iq_info.lower_thd = lower_thd;
+ iq_info.prod_idx = prod_idx;
+
+ err = l2nic_msg_to_mgmt_async(hwdev, HINIC_PORT_CMD_SET_IQ_ENABLE,
+ &iq_info, sizeof(iq_info));
+ if (err || iq_info.status) {
+ nic_err(dev->dev_hdl, "Failed to set iq enable for rq:%d, err: %d, status:
0x%x\n",
+ q_id, err, iq_info.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* nictool */
+int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period)
+{
+ return 0;
+}
+
+int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *cfg)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_lro_config lro_cfg = {0};
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ if (!hwdev || !cfg)
+ return -EINVAL;
+
+ lro_cfg.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LRO,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err || !out_size || lro_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, lro_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ cfg->func_id = lro_cfg.func_id;
+ cfg->lro_ipv4_en = lro_cfg.lro_ipv4_en;
+ cfg->lro_ipv6_en = lro_cfg.lro_ipv6_en;
+ cfg->lro_max_wqe_num = lro_cfg.lro_max_wqe_num;
+ return 0;
+}
+
+int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size)
+{
+ return 0;
+}
+
+int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size)
+{
+ return 0;
+}
+
+int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable)
+{
+ struct hinic_port_loopback lb = {0};
+ u16 out_size = sizeof(lb);
+ int err;
+
+ lb.mode = mode;
+ lb.en = enable;
+
+ if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Invalid loopback mode %d to set\n", mode);
+ return -EINVAL;
+ }
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+ &lb, sizeof(lb), &lb, &out_size);
+ if (err || !out_size || lb.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size:
0x%x\n",
+ mode, enable, err, lb.status, out_size);
+ return -EINVAL;
+ }
+
+ nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Set loopback mode %d en %d succeed\n", mode, enable);
+
+ return 0;
+}
+
+int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable)
+{
+ struct hinic_port_loopback lb = {0};
+ u16 out_size = sizeof(lb);
+ int err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LOOPBACK_MODE,
+ &lb, sizeof(lb), &lb, &out_size);
+ if (err || !out_size || lb.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get loopback mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lb.status, out_size);
+ return -EINVAL;
+ }
+
+ *mode = lb.mode;
+ *enable = lb.en;
+ return 0;
+}
+
+int hinic_set_loopback_mode(void *hwdev, bool enable)
+{
+ return hinic_set_loopback_mode_ex(hwdev, HINIC_INTERNAL_LP_MODE,
+ enable);
+}
+
+int hinic_get_port_enable_state(void *hwdev, bool *enable)
+{
+ return 0;
+}
+
+int hinic_get_vport_enable_state(void *hwdev, bool *enable)
+{
+ return 0;
+}
+
+int hinic_set_lli_state(void *hwdev, u8 lli_state)
+{
+ return 0;
+}
+
+int hinic_set_vport_enable(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vport_state en_state = {0};
+ u16 out_size = sizeof(en_state);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ en_state.func_id = hinic_global_func_id(hwdev);
+ en_state.state = enable ? 1 : 0;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VPORT_ENABLE,
+ &en_state, sizeof(en_state),
+ &en_state, &out_size);
+ if (err || !out_size || en_state.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, en_state.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_vport_enable);
+
+#define NIC_PORT_DISABLE 0x0
+#define NIC_PORT_ENABLE 0x3
+int hinic_set_port_enable(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_state en_state = {0};
+ u16 out_size = sizeof(en_state);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (HINIC_IS_VF(nic_hwdev))
+ return 0;
+
+ en_state.version = HINIC_CMD_VER_FUNC_ID;
+ en_state.func_id = hinic_global_func_id(hwdev);
+ en_state.state = enable ? NIC_PORT_ENABLE : NIC_PORT_DISABLE;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE,
+ &en_state, sizeof(en_state), &en_state,
+ &out_size);
+ if (err || !out_size || en_state.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set port state, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, en_state.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_port_enable);
+
+/* rss */
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct nic_rss_context_tbl *ctx_tbl;
+ struct hinic_cmd_buf *cmd_buf;
+ u32 ctx = 0;
+ u64 out_param;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctx |= HINIC_RSS_TYPE_SET(1, VALID) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+
+ cmd_buf->size = sizeof(struct nic_rss_context_tbl);
+
+ ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf;
+ ctx_tbl->group_index = cpu_to_be32(tmpl_idx);
+ ctx_tbl->offset = 0;
+ ctx_tbl->size = sizeof(u32);
+ ctx_tbl->size = cpu_to_be32(ctx_tbl->size);
+ ctx_tbl->rsvd = 0;
+ ctx_tbl->ctx = cpu_to_be32(ctx);
+
+ /* cfg the rss context table by command queue */
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ cmd_buf, &out_param, 0);
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ if (err || out_param != 0) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss context table, err: %d\n",
+ err);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type *rss_type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_context_table ctx_tbl = {0};
+ u16 out_size = sizeof(ctx_tbl);
+ int err;
+
+ if (!hwdev || !rss_type)
+ return -EINVAL;
+
+ ctx_tbl.func_id = hinic_global_func_id(hwdev);
+ ctx_tbl.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ &ctx_tbl, sizeof(ctx_tbl),
+ &ctx_tbl, &out_size);
+ if (err || !out_size || ctx_tbl.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, ctx_tbl.status, out_size);
+ return -EINVAL;
+ }
+
+ rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);
+ rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);
+ rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);
+ rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);
+ rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);
+ rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context,
+ TCP_IPV6_EXT);
+ rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);
+ rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);
+
+ return 0;
+}
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_key temp_key = {0};
+ u16 out_size = sizeof(temp_key);
+ int err;
+
+ if (!hwdev || !temp)
+ return -EINVAL;
+
+ temp_key.func_id = hinic_global_func_id(hwdev);
+ temp_key.template_id = (u8)tmpl_idx;
+ memcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set hash key, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, temp_key.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_key temp_key = {0};
+ u16 out_size = sizeof(temp_key);
+ int err;
+
+ if (!hwdev || !temp)
+ return -EINVAL;
+
+ temp_key.func_id = hinic_global_func_id(hwdev);
+ temp_key.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set hash key, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, temp_key.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_engine_type hash_type = {0};
+ u16 out_size = sizeof(hash_type);
+ int err;
+
+ if (!hwdev || !type)
+ return -EINVAL;
+
+ hash_type.func_id = hinic_global_func_id(hwdev);
+ hash_type.template_id = tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
+ &hash_type, sizeof(hash_type),
+ &hash_type, &out_size);
+ if (err || !out_size || hash_type.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get hash engine, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, hash_type.status, out_size);
+ return -EINVAL;
+ }
+
+ *type = hash_type.hash_engine;
+ return 0;
+}
+
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_engine_type hash_type = {0};
+ u16 out_size = sizeof(hash_type);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hash_type.func_id = hinic_global_func_id(hwdev);
+ hash_type.hash_engine = type;
+ hash_type.template_id = tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ &hash_type, sizeof(hash_type),
+ &hash_type, &out_size);
+ if (err || !out_size || hash_type.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set hash engine, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, hash_type.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct nic_rss_indirect_tbl *indir_tbl;
+ struct hinic_cmd_buf *cmd_buf;
+ int i;
+ u32 *temp;
+ u32 indir_size;
+ u64 out_param;
+ int err;
+
+ if (!hwdev || !indir_table)
+ return -EINVAL;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = sizeof(struct nic_rss_indirect_tbl);
+
+ indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf;
+ indir_tbl->group_index = cpu_to_be32(tmpl_idx);
+
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ indir_tbl->entry[i] = (u8)(*(indir_table + i));
+
+ if (0x3 == (i & 0x3)) {
+ temp = (u32 *)&indir_tbl->entry[i - 3];
+ *temp = cpu_to_be32(*temp);
+ }
+ }
+
+ /* cfg the rss indirect table by command queue */
+ indir_size = HINIC_RSS_INDIR_SIZE / 2;
+ indir_tbl->offset = 0;
+ indir_tbl->size = cpu_to_be32(indir_size);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ goto free_buf;
+ }
+
+ indir_tbl->offset = cpu_to_be32(indir_size);
+ indir_tbl->size = cpu_to_be32(indir_size);
+ memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+free_buf:
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_indir_table rss_cfg = {0};
+ u16 out_size = sizeof(rss_cfg);
+ int err = 0, i;
+
+ rss_cfg.func_id = hinic_global_func_id(hwdev);
+ rss_cfg.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+ &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get indir table, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir_table[i] = rss_cfg.indir[i];
+
+ return 0;
+}
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_config rss_cfg = {0};
+ u16 out_size = sizeof(rss_cfg);
+ int err;
+
+ /* micro code required: number of TC should be power of 2 */
+ if (!hwdev || !prio_tc || (tc_num & (tc_num - 1)))
+ return -EINVAL;
+
+ rss_cfg.func_id = hinic_global_func_id(hwdev);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.template_id = tmpl_idx;
+ rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0;
+
+ memcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG,
+ &rss_cfg, sizeof(rss_cfg),
+ &rss_cfg, &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats)
+{
+ struct hinic_port_stats_info stats_info = {0};
+ struct hinic_cmd_vport_stats vport_stats = {0};
+ u16 out_size = sizeof(vport_stats);
+ int err;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.func_id = hinic_global_func_id(hwdev);
+ stats_info.stats_size = sizeof(vport_stats);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,
+ &stats_info, sizeof(stats_info),
+ &vport_stats, &out_size);
+ if (err || !out_size || vport_stats.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get function statistics, err: %d, status: 0x%x, out size:
0x%x\n",
+ err, vport_stats.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(stats, &vport_stats.stats, sizeof(*stats));
+
+ return 0;
+}
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats)
+{
+ struct hinic_port_stats *port_stats;
+ struct hinic_port_stats_info stats_info = {0};
+ u16 out_size = sizeof(*port_stats);
+ int err;
+
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats)
+ return -ENOMEM;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.stats_size = sizeof(*port_stats);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,
+ &stats_info, sizeof(stats_info),
+ port_stats, &out_size);
+ if (err || !out_size || port_stats->status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_stats->status, out_size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(stats, &port_stats->stats, sizeof(*stats));
+
+out:
+ kfree(port_stats);
+
+ return err;
+}
+
+int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_version_info up_ver = {0};
+ u16 out_size;
+ int err;
+
+ out_size = sizeof(up_ver);
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+ &up_ver, sizeof(up_ver), &up_ver,
+ &out_size);
+ if (err || !out_size || up_ver.status) {
+ nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, up_ver.status, out_size);
+ return -EINVAL;
+ }
+
+ snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver);
+
+ return 0;
+}
+
+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_version_info ver_info = {0};
+ u16 out_size = sizeof(ver_info);
+ int err;
+
+ if (!hwdev || !fw_ver)
+ return -EINVAL;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+ &ver_info, sizeof(ver_info), &ver_info,
+ &out_size);
+ if (err || !out_size || ver_info.status) {
+ nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, ver_info.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(fw_ver->mgmt_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+ out_size = sizeof(ver_info);
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_BOOT_VERSION,
+ &ver_info, sizeof(ver_info), &ver_info,
+ &out_size);
+ if (err || !out_size || ver_info.status) {
+ nic_err(dev->dev_hdl, "Failed to get boot versionerr: %d, status: 0x%x, out
size: 0x%x\n",
+ err, ver_info.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(fw_ver->boot_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+ out_size = sizeof(ver_info);
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_GET_MICROCODE_VERSION,
+ &ver_info, sizeof(ver_info), &ver_info,
+ &out_size);
+ if (err || !out_size || ver_info.status) {
+ nic_err(dev->dev_hdl, "Failed to get microcode version, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, ver_info.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(fw_ver->microcode_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_fw_version);
+
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_mgmt template_mgmt = {0};
+ u16 out_size = sizeof(template_mgmt);
+ int err;
+
+ if (!hwdev || !tmpl_idx)
+ return -EINVAL;
+
+ template_mgmt.func_id = hinic_global_func_id(hwdev);
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to alloc rss template, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ *tmpl_idx = template_mgmt.template_id;
+
+ return 0;
+}
+
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_mgmt template_mgmt = {0};
+ u16 out_size = sizeof(template_mgmt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ template_mgmt.func_id = hinic_global_func_id(hwdev);
+ template_mgmt.template_id = tmpl_idx;
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to free rss template, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_port_funcs_state(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_funcs_state state = {0};
+ u16 out_size = sizeof(state);
+ int err = 0;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ state.func_id = hinic_global_func_id(hwdev);
+ state.drop_en = enable ? 0 : 1;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_FUNCS_STATE,
+ &state, sizeof(state), &state, &out_size);
+ if (err || !out_size || state.status) {
+ nic_err(dev->dev_hdl, "Failed to %s all functions in port, err: %d, status:
0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, state.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_reset_port_link_cfg(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_reset_link_cfg reset_cfg = {0};
+ u16 out_size = sizeof(reset_cfg);
+ int err;
+
+ reset_cfg.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG,
+ &reset_cfg, sizeof(reset_cfg),
+ &reset_cfg, &out_size);
+ if (err || !out_size || reset_cfg.status) {
+ nic_err(dev->dev_hdl, "Failed to reset port link configure, err: %d, status:
0x%x, out size: 0x%x\n",
+ err, reset_cfg.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev || !mac)
+ return -EINVAL;
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ memcpy(nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].vf_mac_addr, mac,
+ ETH_ALEN);
+
+ return 0;
+}
+
+static int hinic_change_vf_mtu_msg_handler(struct hinic_hwdev *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_CHANGE_MTU, buf_in, in_size,
+ buf_out, out_size, 0);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %u mtu\n", vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic_get_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_set *mac_info = buf_out;
+ int err;
+
+ if (nic_io->hwdev->func_mode == FUNC_MOD_MULTI_BM_SLAVE) {
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_MAC, buf_in,
+ in_size, buf_out, out_size, 0);
+ return err;
+ }
+
+ memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN);
+ mac_info->status = 0;
+ *out_size = sizeof(*mac_info);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_set *mac_in = buf_in;
+ struct hinic_port_mac_set *mac_out = buf_out;
+ int err;
+
+ if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF %d MAC
address\n",
+ HW_VF_ID_TO_OS(vf));
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_MAC, buf_in, in_size,
+ buf_out, out_size, 0);
+ if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to set VF %d MAC address, err: %d,
status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_del_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_set *mac_in = buf_in;
+ struct hinic_port_mac_set *mac_out = buf_out;
+ int err;
+
+ if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) &&
+ !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac.\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_DEL_MAC, buf_in, in_size,
+ buf_out, out_size, 0);
+ if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to delete VF %d MAC, err: %d,
status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_update_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_update *mac_in = buf_in;
+ struct hinic_port_mac_update *mac_out = buf_out;
+ int err;
+
+ if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->new_mac)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac.\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_UPDATE_MAC, buf_in,
+ in_size, buf_out, out_size, 0);
+ if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "Failed to update VF %d MAC, err: %d,
status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+/*lint -save -e734*/
+static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
+ u8 qos, int vf_id)
+{
+ struct hinic_vf_vlan_config vf_vlan = {0};
+ u8 cmd;
+ u16 out_size = sizeof(vf_vlan);
+ int err;
+
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ vf_vlan.vlan_id = vid;
+ vf_vlan.qos = qos;
+
+ if (add)
+ cmd = HINIC_PORT_CMD_SET_VF_VLAN;
+ else
+ cmd = HINIC_PORT_CMD_CLR_VF_VLAN;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, cmd, &vf_vlan, sizeof(vf_vlan),
+ &vf_vlan, &out_size);
+ if (err || !out_size || vf_vlan.status) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x, out
size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*lint -restore*/
+static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 func_id;
+ int err = 0;
+
+ vf_info = hwdev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac) {
+ func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, 0, func_id);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %d MAC\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+ if (hinic_vf_info_vlanprio(hwdev, vf_id)) {
+ err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan,
+ vf_info->pf_qos, vf_id);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to add VF %d VLAN_QOS\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (vf_info->max_rate) {
+ err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
+ vf_info->min_rate);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %d max rate %d, min rate
%d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
+ vf_info->min_rate);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct hinic_register_vf *register_info = buf_out;
+ int err;
+
+ if (vf_id > nic_io->max_vfs) {
+ nic_err(hw_dev->dev_hdl, "Register VF id %d exceed limit[0-%d]\n",
+ HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs));
+ register_info->status = EFAULT;
+ return -EFAULT;
+ }
+
+ *out_size = sizeof(*register_info);
+ err = hinic_init_vf_config(hw_dev, vf_id);
+ if (err) {
+ register_info->status = EFAULT;
+ return err;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true;
+
+ return 0;
+}
+
+void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+
+ if (vf_id > nic_io->max_vfs)
+ return;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false;
+}
+
+static void hinic_get_vf_link_status_msg_handler(struct hinic_nic_io *nic_io,
+ u16 vf_id, void *buf_out,
+ u16 *out_size)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ struct hinic_get_link *get_link = buf_out;
+ bool link_forced, link_up;
+
+ link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced;
+ link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up;
+
+ if (link_forced)
+ get_link->link_status = link_up ?
+ HINIC_LINK_UP : HINIC_LINK_DOWN;
+ else
+ get_link->link_status = nic_io->link_status;
+
+ get_link->status = 0;
+ *out_size = sizeof(*get_link);
+}
+
+static void hinic_get_vf_cos_msg_handler(struct hinic_nic_io *nic_io,
+ u16 vf_id, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_vf_dcb_state *dcb_state = buf_out;
+
+ memcpy(&dcb_state->state, &nic_io->dcb_state,
+ sizeof(nic_io->dcb_state));
+
+ dcb_state->status = 0;
+ *out_size = sizeof(*dcb_state);
+}
+
+/* pf receive message from vf */
+int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ u8 size = sizeof(nic_cmd_support_vf) / sizeof(nic_cmd_support_vf[0]);
+ struct hinic_nic_io *nic_io;
+ int err = 0;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd,
+ buf_in, in_size, size)) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "PF Receive VF nic cmd(0x%x), mbox len(0x%x) is invalid\n",
+ cmd, in_size);
+ err = HINIC_MBOX_VF_CMD_ERROR;
+ return err;
+ }
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ switch (cmd) {
+ case HINIC_PORT_CMD_VF_REGISTER:
+ err = hinic_register_vf_msg_handler(hwdev, vf_id, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_VF_UNREGISTER:
+ *out_size = 0;
+ hinic_unregister_vf_msg_handler(hwdev, vf_id);
+ break;
+
+ case HINIC_PORT_CMD_CHANGE_MTU:
+ err = hinic_change_vf_mtu_msg_handler(hwdev, vf_id, buf_in,
+ in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_GET_MAC:
+ hinic_get_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out, out_size);
+ break;
+
+ case HINIC_PORT_CMD_SET_MAC:
+ err = hinic_set_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out, out_size);
+ break;
+
+ case HINIC_PORT_CMD_DEL_MAC:
+ err = hinic_del_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out, out_size);
+ break;
+
+ case HINIC_PORT_CMD_UPDATE_MAC:
+ err = hinic_update_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_GET_LINK_STATE:
+ hinic_get_vf_link_status_msg_handler(nic_io, vf_id, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_GET_VF_COS:
+ hinic_get_vf_cos_msg_handler(nic_io, vf_id, buf_out, out_size);
+ break;
+
+ default:
+ /* pass through */
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ cmd, buf_in, in_size,
+ buf_out, out_size, 0);
+
+ break;
+ }
+
+ if (err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ nic_err(nic_io->hwdev->dev_hdl, "PF receive VF L2NIC cmd: %d process error,
err:%d\n",
+ cmd, err);
+ return err;
+}
+
+static int hinic_init_vf_infos(struct hinic_nic_io *nic_io, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 vf_link_state;
+
+ if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) {
+ nic_warn(nic_io->hwdev->dev_hdl, "Module Parameter set_vf_link_state value
%d is out of range, resetting to %d\n",
+ set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO);
+ set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO;
+ }
+
+ vf_link_state = hinic_support_ovs(nic_io->hwdev, NULL) ?
+ HINIC_IFLA_VF_LINK_STATE_ENABLE : set_vf_link_state;
+
+ if (FUNC_FORCE_LINK_UP(nic_io->hwdev))
+ vf_link_state = HINIC_IFLA_VF_LINK_STATE_ENABLE;
+
+ switch (vf_link_state) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[vf_id].link_forced = false;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = true;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = false;
+ break;
+ default:
+ nic_err(nic_io->hwdev->dev_hdl, "Input parameter set_vf_link_state error:
%d\n",
+ vf_link_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id)
+{
+ u16 i, func_idx;
+ int err;
+
+ /* vf use 256K as default wq page size, and can't change it */
+ for (i = start_vf_id; i <= end_vf_id; i++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev) + i;
+ err = hinic_set_wq_page_size(hwdev, func_idx,
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id)
+{
+ u16 func_idx, idx;
+
+ for (idx = start_vf_id; idx <= end_vf_id; idx++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev) + idx;
+ hinic_set_wq_page_size(hwdev, func_idx, HINIC_HW_WQ_PAGE_SIZE);
+
+ hinic_clear_vf_infos(hwdev, idx);
+ }
+
+ return 0;
+}
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_nic_io *nic_io;
+ int err = 0;
+ struct hinic_register_vf register_info = {0};
+ u32 size;
+ u16 i, out_size = sizeof(register_info);
+
+ hwdev->nic_io = kzalloc(sizeof(*hwdev->nic_io), GFP_KERNEL);
+ if (!hwdev->nic_io)
+ return -ENOMEM;
+
+ nic_io = hwdev->nic_io;
+ nic_io->hwdev = hwdev;
+
+ sema_init(&nic_io->nic_cfg.cfg_lock, 1);
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_REGISTER,
+ ®ister_info, sizeof(register_info),
+ ®ister_info, &out_size, 0);
+ if (err || register_info.status || !out_size) {
+ nic_err(hwdev->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, register_info.status, out_size);
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ err = -EIO;
+ goto out_free_nic_io;
+ }
+ } else {
+ nic_io->max_vfs = hinic_func_max_vf(hwdev);
+ size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs;
+ if (size != 0) {
+ nic_io->vf_infos = kzalloc(size, GFP_KERNEL);
+ if (!nic_io->vf_infos) {
+ err = -ENOMEM;
+ goto out_free_nic_io;
+ }
+
+ for (i = 0; i < nic_io->max_vfs; i++) {
+ err = hinic_init_vf_infos(nic_io, i);
+ if (err)
+ goto init_vf_infos_err;
+ }
+
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_pf_mbox_handler);
+ if (err)
+ goto register_pf_mbox_cb_err;
+ }
+ }
+
+ return 0;
+
+register_pf_mbox_cb_err:
+init_vf_infos_err:
+ kfree(nic_io->vf_infos);
+
+out_free_nic_io:
+ sema_deinit(&hwdev->nic_io->nic_cfg.cfg_lock);
+ kfree(hwdev->nic_io);
+ hwdev->nic_io = NULL;
+
+ return err;
+}
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf unregister = {0};
+ u16 out_size = sizeof(unregister);
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_UNREGISTER,
+ &unregister, sizeof(unregister),
+ &unregister, &out_size, 0);
+ if (err || !out_size || unregister.status)
+ nic_err(hwdev->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x,
out_size: 0x%x\n",
+ err, unregister.status, out_size);
+ } else {
+ if (hwdev->nic_io->vf_infos) {
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ kfree(hwdev->nic_io->vf_infos);
+ }
+ }
+
+ sema_deinit(&hwdev->nic_io->nic_cfg.cfg_lock);
+
+ kfree(hwdev->nic_io);
+ hwdev->nic_io = NULL;
+}
+
+/*lint -save -e734*/
+/* this function just be called by hinic_ndo_set_vf_mac, others are
+ * not permitted
+ */
+int hinic_set_vf_mac(void *hwdev, int vf, unsigned char *mac_addr)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ u16 func_id;
+ int err;
+
+ /* duplicate request, so just return success */
+ if (vf_info->pf_set_mac &&
+ !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN))
+ return 0;
+
+ vf_info->pf_set_mac = true;
+
+ func_id = hinic_glb_pf_vf_offset(hw_dev) + vf;
+ err = hinic_update_mac(hw_dev, vf_info->vf_mac_addr,
+ mac_addr, 0, func_id);
+ if (err) {
+ vf_info->pf_set_mac = false;
+ return err;
+ }
+
+ memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hw_dev, true, vlan, qos, vf_id);
+ if (err)
+ return err;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos;
+
+ nic_info(hw_dev->dev_hdl, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan, qos, HW_VF_ID_TO_OS(vf_id));
+ return 0;
+}
+
+int hinic_kill_vf_vlan(void *hwdev, int vf_id)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hw_dev, false,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, vf_id);
+ if (err)
+ return err;
+
+ nic_info(hw_dev->dev_hdl, "Remove VLAN %d on VF %d\n",
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ HW_VF_ID_TO_OS(vf_id));
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0;
+
+ return 0;
+}
+
+u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ u16 pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan;
+ u8 pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos;
+ u16 vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT;
+
+ return vlanprio;
+}
+
+/*lint -restore*/
+
+bool hinic_vf_is_registered(void *hwdev, u16 vf_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered;
+}
+
+void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct vf_data_storage *vfinfo;
+
+ vfinfo = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ ivi->vf = HW_VF_ID_TO_OS(vf_id);
+ memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN);
+ ivi->vlan = vfinfo->pf_vlan;
+ ivi->qos = vfinfo->pf_qos;
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ ivi->max_tx_rate = vfinfo->max_rate;
+ ivi->min_tx_rate = vfinfo->min_rate;
+#else
+ ivi->tx_rate = vfinfo->max_rate;
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+ if (!vfinfo->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vfinfo->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+#endif
+}
+
+void hinic_clear_vf_infos(void *hwdev, u16 vf_id)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct vf_data_storage *vf_infos;
+ u16 func_id;
+
+ func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ vf_infos = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_infos->pf_set_mac)
+ hinic_del_mac(hwdev, vf_infos->vf_mac_addr, 0, func_id);
+
+ if (hinic_vf_info_vlanprio(hwdev, vf_id))
+ hinic_kill_vf_vlan(hwdev, vf_id);
+
+ if (vf_infos->max_rate)
+ hinic_set_vf_tx_rate(hwdev, vf_id, 0, 0);
+
+ memset(vf_infos, 0, sizeof(*vf_infos));
+ /* set vf_infos to default */
+ hinic_init_vf_infos(hw_dev->nic_io, HW_VF_ID_TO_OS(vf_id));
+}
+
+static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id,
+ u8 link_status)
+{
+ struct hinic_port_link_status link = {0};
+ struct vf_data_storage *vf_infos = hwdev->nic_io->vf_infos;
+ u16 out_size = sizeof(link);
+ int err;
+
+ if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) {
+ link.link = link_status;
+ link.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ &link, sizeof(link),
+ &link, &out_size, 0);
+ if (err || !out_size || link.status)
+ nic_err(hwdev->dev_hdl,
+ "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size:
0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err,
+ link.status, out_size);
+ }
+}
+
+/* send link change event mbox msg to active vfs under the pf */
+void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link_status)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ u16 i;
+
+ nic_io->link_status = link_status;
+ for (i = 1; i <= nic_io->max_vfs; i++) {
+ if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced)
+ hinic_notify_vf_link_status(nic_io->hwdev, i,
+ link_status);
+ }
+}
+
+void hinic_save_pf_link_status(void *hwdev, u8 link_status)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ nic_io->link_status = link_status;
+}
+
+int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 link_status = 0;
+
+ switch (link) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
+ true : false;
+ link_status = nic_io->link_status;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
+ link_status = HINIC_LINK_UP;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
+ link_status = HINIC_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Notify the VF of its new link state */
+ hinic_notify_vf_link_status(hwdev, vf_id, link_status);
+
+ return 0;
+}
+
+static int hinic_set_vf_rate_limit(void *hwdev, u16 vf_id, u32 tx_rate)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ rate_cfg.tx_rate = tx_rate;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg,
+ sizeof(rate_cfg), &rate_cfg,
+ &out_size, 0);
+ if (err || !out_size || rate_cfg.status) {
+ nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x,
out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
+ out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate_max_min(void *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ rate_cfg.max_rate = max_rate;
+ rate_cfg.min_rate = min_rate;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size, 0);
+ if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rate_cfg.status) || err || !out_size) {
+ nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) max rate(%d), min rate(%d), err:
%d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
+ rate_cfg.status, out_size);
+ return -EIO;
+ }
+
+ if (!rate_cfg.status) {
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ }
+
+ return rate_cfg.status;
+}
+
+int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ int err;
+
+ err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED)
+ return err;
+
+ if (min_rate) {
+ nic_err(hw_dev->dev_hdl, "Current firmware don't support to set min tx
rate\n");
+ return -EINVAL;
+ }
+
+ nic_info(hw_dev->dev_hdl, "Current firmware don't support to set min tx
rate, force min_tx_rate = max_tx_rate\n");
+
+ return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+}
+
+int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io;
+ struct vf_data_storage *vf_infos;
+ struct hinic_vf_dcb_state vf_dcb = {0};
+ u16 vf_id, out_size = 0;
+ int err;
+
+ if (!hwdev || !dcb_state || !hw_dev->nic_io)
+ return -EINVAL;
+
+ nic_io = hw_dev->nic_io;
+ if (!memcmp(&nic_io->dcb_state, dcb_state, sizeof(nic_io->dcb_state)))
+ return 0;
+
+ memcpy(&vf_dcb.state, dcb_state, sizeof(vf_dcb.state));
+ /* save in sdk, vf will get dcb state when probing */
+ hinic_save_dcb_state(hwdev, dcb_state);
+
+ /* notify statefull in pf, than notify all vf */
+ hinic_notify_dcb_state_event(hwdev, dcb_state);
+
+ /* not vf supported, don't need to notify vf */
+ if (!nic_io->vf_infos)
+ return 0;
+
+ vf_infos = nic_io->vf_infos;
+ for (vf_id = 0; vf_id < nic_io->max_vfs; vf_id++) {
+ if (vf_infos[vf_id].registered) {
+ vf_dcb.status = 0;
+ out_size = sizeof(vf_dcb);
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ OS_VF_ID_TO_HW(vf_id),
+ HINIC_PORT_CMD_SET_VF_COS,
+ &vf_dcb, sizeof(vf_dcb), &vf_dcb,
+ &out_size, 0);
+ if (err || vf_dcb.status || !out_size)
+ nic_err(hw_dev->dev_hdl,
+ "Failed to notify dcb state to VF %d, err: %d, status: 0x%x, out size:
0x%x\n",
+ vf_id, err, vf_dcb.status, out_size);
+ }
+ }
+
+ return 0;
+}
+
+int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev || !dcb_state)
+ return -EINVAL;
+
+ nic_io = hw_dev->nic_io;
+ memcpy(dcb_state, &nic_io->dcb_state, sizeof(*dcb_state));
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_dcb_state);
+
+int hinic_save_dcb_state(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev || !dcb_state)
+ return -EINVAL;
+
+ if (!hwdev->nic_io)
+ return -EINVAL;
+
+ nic_io = hwdev->nic_io;
+ memcpy(&nic_io->dcb_state, dcb_state, sizeof(*dcb_state));
+
+ return 0;
+}
+
+int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_vf_dcb_state vf_dcb = {0};
+ u16 out_size = sizeof(vf_dcb);
+ int err;
+
+ if (!hwdev || !dcb_state)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ nic_err(hw_dev->dev_hdl, "Only vf need to get pf dcb state\n");
+ return -EINVAL;
+ }
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_VF_COS, &vf_dcb,
+ sizeof(vf_dcb), &vf_dcb,
+ &out_size, 0);
+ if (err || !out_size || vf_dcb.status) {
+ nic_err(hw_dev->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x,
out size: 0x%x\n",
+ err, vf_dcb.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state));
+ /* Save dcb_state in hw for statefull module */
+ hinic_save_dcb_state(hwdev, dcb_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_pf_dcb_state);
+
+int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_ipsu_mac mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ mac_info.index = index;
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_IPSU_MAC,
+ &mac_info, sizeof(mac_info), &mac_info,
+ &out_size);
+ if (err || !out_size || mac_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to set IPSU MAC(index %d), err: %d, status: 0x%x, out size:
0x%x\n",
+ index, err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id,
+ u16 *func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_ipsu_mac mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ mac_info.index = index;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_IPSU_MAC,
+ &mac_info, sizeof(mac_info), &mac_info,
+ &out_size);
+ if (err || !out_size || mac_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to get IPSU MAC(index %d), err: %d, status: 0x%x, out size:
0x%x\n",
+ index, err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+ *func_id = mac_info.func_id;
+ *vlan_id = mac_info.vlan_id;
+ memcpy(mac_addr, mac_info.mac, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_set_anti_attack(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_anti_attack_rate rate = {0};
+ u16 out_size = sizeof(rate);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ rate.func_id = hinic_global_func_id(hwdev);
+ rate.enable = enable;
+ rate.cir = ANTI_ATTACK_DEFAULT_CIR;
+ rate.xir = ANTI_ATTACK_DEFAULT_XIR;
+ rate.cbs = ANTI_ATTACK_DEFAULT_CBS;
+ rate.xbs = ANTI_ATTACK_DEFAULT_XBS;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE,
+ &rate, sizeof(rate), &rate,
+ &out_size);
+ if (err || !out_size || rate.status) {
+ nic_err(nic_hwdev->dev_hdl, "Can`t %s port Anti-Attack rate limit err: %d,
status: 0x%x, out size: 0x%x\n",
+ (enable ? "enable" : "disable"), err, rate.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ nic_info(nic_hwdev->dev_hdl, "%s port Anti-Attack rate limit succeed.\n",
+ (enable ? "Enable" : "Disable"));
+
+ return 0;
+}
+
+int hinic_flush_sq_res(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_clear_sq_resource sq_res = {0};
+ u16 out_size = sizeof(sq_res);
+ int err;
+
+ sq_res.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_SQ_RES,
+ &sq_res, sizeof(sq_res), &sq_res,
+ &out_size);
+ if (err || !out_size || sq_res.status) {
+ nic_err(dev->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out
size: 0x%x\n",
+ err, sq_res.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level);
+
+int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg;
+ int err = 0;
+
+ down(&nic_cfg->cfg_lock);
+
+ /* Enable PFC will disable pause */
+ if (nic_cfg->pfc_en) {
+ err = hinic_dcb_set_hw_pfc(hwdev, nic_cfg->pfc_en,
+ nic_cfg->pfc_bitmap);
+ if (err)
+ nic_err(dev->dev_hdl, "Failed to set pfc\n");
+
+ } else if (!port_info->autoneg_state || nic_cfg->pause_set) {
+ nic_cfg->nic_pause.auto_neg = port_info->autoneg_state;
+ err = hinic_set_hw_pause_info(hwdev, nic_cfg->nic_pause);
+ if (err)
+ nic_err(dev->dev_hdl, "Failed to set pause\n");
+ }
+
+ if (FUNC_SUPPORT_RATE_LIMIT(hwdev)) {
+ err = __set_pf_bw(hwdev, port_info->speed);
+ if (err)
+ nic_err(dev->dev_hdl, "Failed to set pf bandwidth limit\n");
+ }
+
+ up(&nic_cfg->cfg_lock);
+
+ return err;
+}
+
+int hinic_set_super_cqe_state(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_super_cqe super_cqe = {0};
+ u16 out_size = sizeof(super_cqe);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ super_cqe.func_id = hinic_global_func_id(hwdev);
+ super_cqe.super_cqe_en = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SUPER_CQE,
+ &super_cqe, sizeof(super_cqe), &super_cqe,
+ &out_size);
+ if (err || !out_size || super_cqe.status) {
+ nic_err(nic_hwdev->dev_hdl, "Can`t %s surper cqe, err: %d, status: 0x%x, out
size: 0x%x\n",
+ (enable ? "enable" : "disable"), err, super_cqe.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ nic_info(nic_hwdev->dev_hdl, "%s super cqe succeed.\n",
+ (enable ? "Enable" : "Disable"));
+
+ return 0;
+}
+
+int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_capture_info cap_info = {0};
+ u16 out_size = sizeof(cap_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cap_info.op_type = 2; /* function capture */
+ cap_info.is_en_trx = cap_en;
+ cap_info.func_id = func_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_UCAPTURE_OPT,
+ &cap_info, sizeof(cap_info),
+ &cap_info, &out_size);
+ if (err || !out_size || cap_info.status) {
+ nic_err(dev->dev_hdl,
+ "Failed to set function capture attr, err: %d, status: 0x%x, out size:
0x%x\n",
+ err, cap_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_func_capture_en);
+
+int hinic_force_drop_tx_pkt(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_force_pkt_drop pkt_drop = {0};
+ u16 out_size = sizeof(pkt_drop);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ pkt_drop.port = hinic_physical_port_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_FORCE_PKT_DROP,
+ &pkt_drop, sizeof(pkt_drop),
+ &pkt_drop, &out_size);
+ if ((pkt_drop.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ pkt_drop.status) || err || !out_size) {
+ nic_err(dev->dev_hdl,
+ "Failed to set force tx packets drop, err: %d, status: 0x%x, out size:
0x%x\n",
+ err, pkt_drop.status, out_size);
+ return -EFAULT;
+ }
+
+ return pkt_drop.status;
+}
+
+u32 hw_speed_convert[LINK_SPEED_LEVELS] = {
+ 10, 100, 1000, 10000,
+ 25000, 40000, 100000
+};
+
+static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level)
+{
+ struct hinic_nic_cfg *nic_cfg = &hwdev->nic_io->nic_cfg;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u32 pf_bw = 0;
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ if (speed_level >= LINK_SPEED_LEVELS) {
+ nic_err(hwdev->dev_hdl, "Invalid speed level: %d\n",
+ speed_level);
+ return -EINVAL;
+ }
+
+ if (nic_cfg->pf_bw_limit == 100) {
+ pf_bw = 0; /* unlimit bandwidth */
+ } else {
+ pf_bw = (hw_speed_convert[speed_level] / 100) *
+ nic_cfg->pf_bw_limit;
+ /* bandwidth limit is very small but not unlimit in this case */
+ if (pf_bw == 0)
+ pf_bw = 1;
+ }
+
+ rate_cfg.func_id = hinic_global_func_id(hwdev);
+ rate_cfg.tx_rate = pf_bw;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg,
+ sizeof(rate_cfg), &rate_cfg,
+ &out_size, 0);
+ if (err || !out_size || rate_cfg.status) {
+ nic_err(hwdev->dev_hdl, "Failed to set rate(%d), err: %d, status: 0x%x, out
size: 0x%x\n",
+ pf_bw, err, rate_cfg.status, out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_update_pf_bw(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct nic_port_info port_info = {0};
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !(FUNC_SUPPORT_RATE_LIMIT(hwdev)))
+ return 0;
+
+ err = hinic_get_port_info(hwdev, &port_info);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get port info\n");
+ return -EIO;
+ }
+
+ err = __set_pf_bw(hwdev, port_info.speed);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set pf bandwidth\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg;
+ u32 old_bw_limit;
+ u8 link_state = 0;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ if (bw_limit > 100) {
+ nic_err(dev->dev_hdl, "Invalid bandwidth: %d\n", bw_limit);
+ return -EINVAL;
+ }
+
+ err = hinic_get_link_state(hwdev, &link_state);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get link state\n");
+ return -EIO;
+ }
+
+ if (!link_state) {
+ nic_err(dev->dev_hdl, "Link status must be up when set pf tx rate\n");
+ return -EINVAL;
+ }
+
+ nic_cfg = &dev->nic_io->nic_cfg;
+ old_bw_limit = nic_cfg->pf_bw_limit;
+ nic_cfg->pf_bw_limit = bw_limit;
+
+ err = hinic_update_pf_bw(hwdev);
+ if (err) {
+ nic_cfg->pf_bw_limit = old_bw_limit;
+ return err;
+ }
+
+ return 0;
+}
+
+/* Set link status follow port status */
+int hinic_set_link_status_follow(void *hwdev,
+ enum hinic_link_follow_status status)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_link_follow follow = {0};
+ u16 out_size = sizeof(follow);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) {
+ nic_err(dev->dev_hdl,
+ "Invalid link follow status: %d\n", status);
+ return -EINVAL;
+ }
+
+ follow.func_id = hinic_global_func_id(hwdev);
+ follow.follow_status = status;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW,
+ &follow, sizeof(follow), &follow,
+ &out_size);
+ if ((follow.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ follow.status) || err || !out_size) {
+ nic_err(dev->dev_hdl,
+ "Failed to set link status follow port status, err: %d, status: 0x%x, out size:
0x%x\n",
+ err, follow.status, out_size);
+ return -EFAULT;
+ }
+
+ return follow.status;
+}
+EXPORT_SYMBOL(hinic_set_link_status_follow);
+
+/* HILINK module */
+
+#define HINIC_MGMT_DEFAULT_SIZE 1
+
+static int __hilink_msg_to_mgmt_sync(void *hwdev, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size,
+ u32 timeout)
+{
+ int err;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_HILINK, cmd, buf_in,
+ in_size, buf_out, out_size, timeout);
+ if (err)
+ return err;
+
+ if (*out_size == HINIC_MGMT_DEFAULT_SIZE && buf_out)
+ *((u8 *)(buf_out)) = HINIC_MGMT_CMD_UNSUPPORTED;
+
+ return 0;
+}
+
+int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info)
+{
+ struct hinic_hilink_link_info link_info = {0};
+ u16 out_size = sizeof(link_info);
+ int err;
+
+ link_info.port_id = hinic_physical_port_id(hwdev);
+
+ err = __hilink_msg_to_mgmt_sync(hwdev, HINIC_HILINK_CMD_GET_LINK_INFO,
+ &link_info, sizeof(link_info),
+ &link_info, &out_size, 0);
+ if ((link_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ link_info.status) || err || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get hilink info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_info.status, out_size);
+ return -EFAULT;
+ }
+
+ if (!link_info.status)
+ memcpy(info, &link_info.info, sizeof(*info));
+ else if (link_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupported command: mod: %d, cmd: %d\n",
+ HINIC_MOD_HILINK, HINIC_HILINK_CMD_GET_LINK_INFO);
+
+ return link_info.status;
+}
+
+int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings)
+{
+ struct hinic_link_ksettings_info info = {0};
+ u16 out_size = sizeof(info);
+ int err;
+
+ info.func_id = hinic_global_func_id(hwdev);
+ info.valid_bitmap = settings->valid_bitmap;
+ info.autoneg = settings->autoneg;
+ info.speed = settings->speed;
+ info.fec = settings->fec;
+
+ err = __hilink_msg_to_mgmt_sync(hwdev,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS,
+ &info, sizeof(info),
+ &info, &out_size, 0);
+ if ((info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ info.status) || err || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info.status, out_size);
+ return -EFAULT;
+ }
+
+ return info.status;
+}
+
+int hinic_disable_tx_promisc(void *hwdev)
+{
+ struct hinic_promsic_info info = {0};
+ u16 out_size = sizeof(info);
+ int err;
+
+ info.cfg = HINIC_TX_PROMISC_DISABLE;
+ info.func_id = hinic_global_func_id(hwdev);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_DISABLE_PROMISIC, &info,
+ sizeof(info), &info, &out_size, 0);
+ if (err || !out_size || info.status) {
+ if (info.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupported to disable TX promisic\n");
+ return 0;
+ }
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to disable multihost promisic, err: %d, status: 0x%x, out size:
0x%x\n",
+ err, info.status, out_size);
+ return -EFAULT;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
new file mode 100644
index 000000000000..3c68149e57b8
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_CFG_H
+#define HINIC_CFG_H
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+#define HINIC_VLAN_PRIORITY_SHIFT 13
+
+#define HINIC_RSS_INDIR_SIZE 256
+#define HINIC_DCB_TC_MAX 0x8
+#define HINIC_DCB_UP_MAX 0x8
+#define HINIC_DCB_COS_MAX 0x8
+#define HINIC_DCB_PG_MAX 0x8
+
+#define HINIC_DCB_TSA_TC_SP 2
+#define HINIC_DCB_TSA_TC_DWRR 0
+
+#define HINIC_RSS_KEY_SIZE 40
+
+#define HINIC_MAX_NUM_RQ 64
+
+#define HINIC_MIN_MTU_SIZE 256
+#define HINIC_MAX_JUMBO_FRAME_SIZE 9600
+
+#define HINIC_LRO_MAX_WQE_NUM_UPPER 32
+#define HINIC_LRO_MAX_WQE_NUM_LOWER 1
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM 4
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 8
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8
+#define HINIC_LRO_WQE_NUM_PANGEA_DEFAULT 32
+
+#define HINIC_LRO_RX_TIMER_UPPER 1024
+#define HINIC_LRO_RX_TIMER_LOWER 1
+#define HINIC_LRO_RX_TIMER_DEFAULT 16
+#define HINIC_LRO_RX_TIMER_DEFAULT_25GE 16
+#define HINIC_LRO_RX_TIMER_DEFAULT_100GE 64
+#define HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE 10
+#define HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE 8
+
+#define HINIC_LOWEST_LATENCY 1
+#define HINIC_RX_RATE_LOW 400000
+#define HINIC_RX_COAL_TIME_LOW 20
+#define HINIC_RX_PENDING_LIMIT_LOW 2
+#define HINIC_RX_RATE_HIGH 1000000
+#define HINIC_RX_COAL_TIME_HIGH 225
+#define HINIC_RX_PENDING_LIMIT_HIGH 50
+#define HINIC_RX_RATE_THRESH 35000
+#define HINIC_RX_RATE_LOW_VM 400000
+#define HINIC_RX_PENDING_LIMIT_HIGH_VM 50
+
+enum hinic_board_type {
+ HINIC_BOARD_UNKNOWN = 0,
+ HINIC_BOARD_10GE = 1,
+ HINIC_BOARD_25GE = 2,
+ HINIC_BOARD_40GE = 3,
+ HINIC_BOARD_100GE = 4,
+ HINIC_BOARD_PG_TP_10GE = 5,
+ HINIC_BOARD_PG_SM_25GE = 6,
+ HINIC_BOARD_PG_100GE = 7,
+};
+
+enum hinic_os_type {
+ HINIC_OS_UNKNOWN = 0,
+ HINIC_OS_HUAWEI = 1,
+ HINIC_OS_NON_HUAWEI = 2,
+};
+
+enum hinic_cpu_type {
+ HINIC_CPU_UNKNOWN = 0,
+ HINIC_CPU_X86_GENERIC = 1,
+ HINIC_CPU_ARM_GENERIC = 2,
+};
+
+struct hinic_adaptive_rx_cfg {
+ u32 lowest_lat;
+ u32 rate_low;
+ u32 coal_time_low;
+ u32 pending_limit_low;
+ u32 rate_high;
+ u32 coal_time_high;
+ u32 pending_limit_high;
+ u32 rate_thresh;
+};
+
+struct hinic_lro_cfg {
+ u32 enable;
+ u32 timer;
+ u32 buffer_size;
+};
+
+struct hinic_environment_info {
+ enum hinic_board_type board;
+ enum hinic_os_type os;
+ enum hinic_cpu_type cpu;
+};
+
+struct hinic_adaptive_cfg {
+ struct hinic_adaptive_rx_cfg adaptive_rx;
+ struct hinic_lro_cfg lro;
+};
+
+enum hinic_rss_hash_type {
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP,
+
+ HINIC_RSS_HASH_ENGINE_TYPE_MAX,
+};
+
+struct ifla_vf_info;
+struct hinic_dcb_state;
+
+struct nic_port_info {
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+};
+
+enum nic_media_type {
+ MEDIA_UNKNOWN = -1,
+ MEDIA_FIBRE = 0,
+ MEDIA_COPPER,
+ MEDIA_BACKPLANE
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_LEVELS,
+};
+
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+enum hinic_port_type {
+ HINIC_PORT_TP, /* BASET */
+ HINIC_PORT_AUI,
+ HINIC_PORT_MII,
+ HINIC_PORT_FIBRE, /* OPTICAL */
+ HINIC_PORT_BNC,
+ HINIC_PORT_ELEC,
+ HINIC_PORT_COPPER, /* PORT_DA */
+ HINIC_PORT_AOC,
+ HINIC_PORT_BACKPLANE,
+ HINIC_PORT_NONE = 0xEF,
+ HINIC_PORT_OTHER = 0xFF,
+};
+
+enum hinic_link_status {
+ HINIC_LINK_DOWN = 0,
+ HINIC_LINK_UP
+};
+
+struct nic_pause_config {
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct nic_lro_info {
+ u16 func_id;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 lro_timer_en;
+ u32 lro_period;
+};
+
+struct nic_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+struct hinic_vport_stats {
+ u64 tx_unicast_pkts_vport;
+ u64 tx_unicast_bytes_vport;
+ u64 tx_multicast_pkts_vport;
+ u64 tx_multicast_bytes_vport;
+ u64 tx_broadcast_pkts_vport;
+ u64 tx_broadcast_bytes_vport;
+
+ u64 rx_unicast_pkts_vport;
+ u64 rx_unicast_bytes_vport;
+ u64 rx_multicast_pkts_vport;
+ u64 rx_multicast_bytes_vport;
+ u64 rx_broadcast_pkts_vport;
+ u64 rx_broadcast_bytes_vport;
+
+ u64 tx_discard_vport;
+ u64 rx_discard_vport;
+ u64 tx_err_vport;
+ u64 rx_err_vport;
+};
+
+struct hinic_phy_port_stats {
+ u64 mac_rx_total_pkt_num;
+ u64 mac_rx_total_oct_num;
+ u64 mac_rx_bad_pkt_num;
+ u64 mac_rx_bad_oct_num;
+ u64 mac_rx_good_pkt_num;
+ u64 mac_rx_good_oct_num;
+ u64 mac_rx_uni_pkt_num;
+ u64 mac_rx_multi_pkt_num;
+ u64 mac_rx_broad_pkt_num;
+
+ u64 mac_tx_total_pkt_num;
+ u64 mac_tx_total_oct_num;
+ u64 mac_tx_bad_pkt_num;
+ u64 mac_tx_bad_oct_num;
+ u64 mac_tx_good_pkt_num;
+ u64 mac_tx_good_oct_num;
+ u64 mac_tx_uni_pkt_num;
+ u64 mac_tx_multi_pkt_num;
+ u64 mac_tx_broad_pkt_num;
+
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undersize_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_64_oct_pkt_num;
+ u64 mac_rx_65_127_oct_pkt_num;
+ u64 mac_rx_128_255_oct_pkt_num;
+ u64 mac_rx_256_511_oct_pkt_num;
+ u64 mac_rx_512_1023_oct_pkt_num;
+ u64 mac_rx_1024_1518_oct_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_pkt_num;
+ u64 mac_rx_1519_max_good_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+
+ u64 mac_rx_pause_num;
+ u64 mac_rx_pfc_pkt_num;
+ u64 mac_rx_pfc_pri0_pkt_num;
+ u64 mac_rx_pfc_pri1_pkt_num;
+ u64 mac_rx_pfc_pri2_pkt_num;
+ u64 mac_rx_pfc_pri3_pkt_num;
+ u64 mac_rx_pfc_pri4_pkt_num;
+ u64 mac_rx_pfc_pri5_pkt_num;
+ u64 mac_rx_pfc_pri6_pkt_num;
+ u64 mac_rx_pfc_pri7_pkt_num;
+ u64 mac_rx_control_pkt_num;
+ u64 mac_rx_y1731_pkt_num;
+ u64 mac_rx_sym_err_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undersize_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_64_oct_pkt_num;
+ u64 mac_tx_65_127_oct_pkt_num;
+ u64 mac_tx_128_255_oct_pkt_num;
+ u64 mac_tx_256_511_oct_pkt_num;
+ u64 mac_tx_512_1023_oct_pkt_num;
+ u64 mac_tx_1024_1518_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_pkt_num;
+ u64 mac_tx_1519_max_good_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+
+ u64 mac_tx_pause_num;
+ u64 mac_tx_pfc_pkt_num;
+ u64 mac_tx_pfc_pri0_pkt_num;
+ u64 mac_tx_pfc_pri1_pkt_num;
+ u64 mac_tx_pfc_pri2_pkt_num;
+ u64 mac_tx_pfc_pri3_pkt_num;
+ u64 mac_tx_pfc_pri4_pkt_num;
+ u64 mac_tx_pfc_pri5_pkt_num;
+ u64 mac_tx_pfc_pri6_pkt_num;
+ u64 mac_tx_pfc_pri7_pkt_num;
+ u64 mac_tx_control_pkt_num;
+ u64 mac_tx_y1731_pkt_num;
+ u64 mac_tx_1588_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+
+ u64 mac_rx_higig2_ext_pkt_num;
+ u64 mac_rx_higig2_message_pkt_num;
+ u64 mac_rx_higig2_error_pkt_num;
+ u64 mac_rx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_rx_higig2_unicast_pkt_num;
+ u64 mac_rx_higig2_broadcast_pkt_num;
+ u64 mac_rx_higig2_l2_multicast_pkt_num;
+ u64 mac_rx_higig2_l3_multicast_pkt_num;
+
+ u64 mac_tx_higig2_message_pkt_num;
+ u64 mac_tx_higig2_ext_pkt_num;
+ u64 mac_tx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_tx_higig2_unicast_pkt_num;
+ u64 mac_tx_higig2_broadcast_pkt_num;
+ u64 mac_tx_higig2_l2_multicast_pkt_num;
+ u64 mac_tx_higig2_l3_multicast_pkt_num;
+};
+
+#define HINIC_MGMT_VERSION_MAX_LEN 32
+
+#define HINIC_FW_VERSION_NAME 16
+#define HINIC_FW_VERSION_SECTION_CNT 4
+#define HINIC_FW_VERSION_SECTION_BORDER 0xFF
+struct hinic_fw_version {
+ u8 mgmt_ver[HINIC_FW_VERSION_NAME];
+ u8 microcode_ver[HINIC_FW_VERSION_NAME];
+ u8 boot_ver[HINIC_FW_VERSION_NAME];
+};
+
+enum hinic_valid_link_settings {
+ HILINK_LINK_SET_SPEED = 0x1,
+ HILINK_LINK_SET_AUTONEG = 0x2,
+ HILINK_LINK_SET_FEC = 0x4,
+};
+
+struct hinic_link_ksettings {
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+};
+
+enum hinic_link_follow_status {
+ HINIC_LINK_FOLLOW_DEFAULT,
+ HINIC_LINK_FOLLOW_PORT,
+ HINIC_LINK_FOLLOW_SEPARATE,
+ HINIC_LINK_FOLLOW_STATUS_MAX,
+};
+
+enum hinic_lro_en_status {
+ HINIC_LRO_STATUS_DISABLE,
+ HINIC_LRO_STATUS_ENABLE,
+ HINIC_LRO_STATUS_UNSET,
+};
+
+#define HINIC_VLAN_FILTER_EN (1U << 0)
+#define HINIC_BROADCAST_FILTER_EX_EN (1U << 1)
+
+/* Set mac_vlan table */
+int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac,
+ u16 vlan_id, u16 func_id);
+/* Obtaining the permanent mac */
+int hinic_get_default_mac(void *hwdev, u8 *mac_addr);
+/* Check whether the current solution is using this interface,
+ * the current code does not invoke the sdk interface to set mtu
+ */
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu);
+/* Set vlan leaf table */
+int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id);
+
+int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl);
+
+int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id);
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info);
+
+int hinic_set_autoneg(void *hwdev, bool enable);
+
+int hinic_force_port_relink(void *hwdev);
+
+int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported,
+ enum hinic_link_mode *advertised);
+
+int hinic_set_port_link_status(void *hwdev, bool enable);
+
+int hinic_set_speed(void *hwdev, enum nic_speed_level speed);
+/* SPEED_UNKNOWN = -1,SPEED_10MB_LINK = 0 */
+int hinic_get_speed(void *hwdev, enum nic_speed_level *speed);
+
+int hinic_get_link_state(void *hwdev, u8 *link_state);
+
+int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause);
+
+int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause);
+
+int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause);
+
+int hinic_set_rx_mode(void *hwdev, u32 enable);
+
+/* offload feature */
+int hinic_set_rx_vlan_offload(void *hwdev, u8 en);
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en);
+
+int hinic_set_tx_tso(void *hwdev, u8 tso_en);
+
+/* Linux NIC used */
+int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num);
+
+/* Win NIC used */
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num);
+
+/* Related command dcbtool*/
+int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap);
+
+int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap);
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid,
+ u8 *up_bw, u8 *prio);
+
+int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid,
+ u8 *up_bw, u8 *prio);
+
+int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up);
+
+int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map);
+
+int hinic_set_pfc_threshold(void *hwdev, u16 op_type, u16 threshold);
+
+int hinic_set_bp_thd(void *hwdev, u16 threshold);
+
+int hinic_disable_fw_bp(void *hwdev);
+
+int hinic_set_iq_enable(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx);
+
+int hinic_set_iq_enable_mgmt(void *hwdev, u16 q_id, u16 lower_thd,
+ u16 prod_idx);
+
+/* nictool adaptation interface*/
+int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period);
+/* There should be output parameters, add the
+ * output parameter struct nic_up_offload *cfg
+ */
+int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *lro_info);
+
+int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size);
+
+int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size);
+
+int hinic_set_loopback_mode(void *hwdev, bool enable);
+int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable);
+int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable);
+
+int hinic_get_port_enable_state(void *hwdev, bool *enable);
+
+int hinic_get_vport_enable_state(void *hwdev, bool *enable);
+
+int hinic_set_lli_state(void *hwdev, u8 lli_state);
+
+int hinic_set_vport_enable(void *hwdev, bool enable);
+
+int hinic_set_port_enable(void *hwdev, bool enable);
+
+/* rss */
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type);
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx,
+ struct nic_rss_type *rss_type);
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp);
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);
+
+int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type);
+
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type);
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table);
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc);
+
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx);
+
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx);
+
+/* disable or enable traffic of all functions in the same port */
+int hinic_set_port_funcs_state(void *hwdev, bool enable);
+
+int hinic_reset_port_link_cfg(void *hwdev);
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats);
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats);
+
+int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver);
+
+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver);
+
+int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac);
+
+int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos);
+
+int hinic_kill_vf_vlan(void *hwdev, int vf_id);
+
+int hinic_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr);
+
+u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id);
+
+bool hinic_vf_is_registered(void *hwdev, u16 vf_id);
+
+void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi);
+
+void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link);
+
+void hinic_save_pf_link_status(void *hwdev, u8 link);
+
+int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link);
+
+int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate);
+
+int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id);
+
+int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id);
+
+int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state);
+
+int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state);
+
+int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state);
+
+int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id,
+ u16 *func_id);
+int hinic_set_anti_attack(void *hwdev, bool enable);
+
+int hinic_flush_sq_res(void *hwdev);
+
+int hinic_set_super_cqe_state(void *hwdev, bool enable);
+
+int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en);
+
+int hinic_force_drop_tx_pkt(void *hwdev);
+
+int hinic_update_pf_bw(void *hwdev);
+
+int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit);
+
+int hinic_set_link_status_follow(void *hwdev,
+ enum hinic_link_follow_status status);
+int hinic_disable_tx_promisc(void *hwdev);
+
+/* HILINK module */
+int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c
b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c
new file mode 100644
index 000000000000..90189f963740
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hwif.h"
+#include "hinic_wq.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic.h"
+#include "hinic_dbg.h"
+
+u16 hinic_dbg_get_qp_num(void *hwdev)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev)
+ return 0;
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ if (!nic_io)
+ return 0;
+
+ return nic_io->num_qps;
+}
+
+void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev)
+ return NULL;
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ if (!nic_io)
+ return NULL;
+
+ if (q_id >= nic_io->num_qps)
+ return NULL;
+
+ return &nic_io->qps[q_id];
+}
+
+void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return NULL;
+
+ return qp->sq.wq;
+}
+
+void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return NULL;
+
+ return qp->rq.wq;
+}
+
+u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id)
+{
+ struct hinic_wq *wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id);
+
+ if (!wq)
+ return 0;
+
+ return ((u16)wq->prod_idx) & wq->mask;
+}
+
+u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (qp)
+ return cpu_to_be16(*qp->rq.pi_virt_addr);
+
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Get rq hw pi failed!\n");
+
+ return 65535;
+}
+
+u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id)
+{
+ struct hinic_wq *wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id);
+
+ if (!wq)
+ return 0;
+
+ return ((u16)wq->prod_idx) & wq->mask;
+}
+
+void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return NULL;
+
+ return qp->sq.cons_idx_addr;
+}
+
+u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return 0;
+
+ return qp->sq.wq->block_paddr;
+}
+
+u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return 0;
+
+ return qp->rq.wq->block_paddr;
+}
+
+int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr,
+ u64 *phy_addr, u32 *pg_idx)
+{
+ struct hinic_qp *qp;
+ struct hinic_hwif *hwif;
+
+ qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+ if (!qp)
+ return -EFAULT;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ *map_addr = (u64 *)qp->sq.db_addr;
+ *pg_idx = DB_IDX(qp->sq.db_addr, hwif->db_base);
+ *phy_addr = hwif->db_base_phy + (*pg_idx) * HINIC_DB_PAGE_SIZE;
+
+ return 0;
+}
+
+u16 hinic_dbg_get_global_qpn(void *hwdev)
+{
+ if (!hwdev)
+ return 0;
+
+ return ((struct hinic_hwdev *)hwdev)->nic_io->global_qpn;
+}
+
+static int get_wqe_info(struct hinic_wq *wq, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size)
+{
+ void *src_wqe;
+ u32 offset;
+ u16 i;
+
+ if (idx + wqebb_cnt > wq->q_depth)
+ return -EFAULT;
+
+ if (*wqe_size != (u16)(wq->wqebb_size * wqebb_cnt)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %d\n",
+ *wqe_size, (u16)(wq->wqebb_size * wqebb_cnt));
+ return -EFAULT;
+ }
+
+ for (i = 0; i < wqebb_cnt; i++) {
+ src_wqe = (void *)hinic_slq_get_addr(wq, idx + i);
+ offset = i * wq->wqebb_size;
+ memcpy(wqe + offset, src_wqe, wq->wqebb_size);
+ }
+
+ return 0;
+}
+
+int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size)
+{
+ struct hinic_wq *wq;
+ int err;
+
+ wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id);
+ if (!wq)
+ return -EFAULT;
+
+ err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size);
+
+ return err;
+}
+
+int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size)
+{
+ struct hinic_wq *wq;
+ int err;
+
+ wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id);
+ if (!wq)
+ return -EFAULT;
+
+ err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size);
+
+ return err;
+}
+
+int hinic_dbg_get_hw_stats(void *hwdev, u8 *hw_stats, u16 *out_size)
+{
+ if (*out_size != sizeof(struct hinic_hw_stats)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(struct hinic_hw_stats));
+ return -EFAULT;
+ }
+
+ memcpy(hw_stats, &((struct hinic_hwdev *)hwdev)->hw_stats,
+ sizeof(struct hinic_hw_stats));
+ return 0;
+}
+
+u16 hinic_dbg_clear_hw_stats(void *hwdev)
+{
+ memset((void *)&((struct hinic_hwdev *)hwdev)->hw_stats, 0,
+ sizeof(struct hinic_hw_stats));
+ memset((void *)((struct hinic_hwdev *)hwdev)->chip_fault_stats, 0,
+ HINIC_CHIP_FAULT_SIZE);
+ return sizeof(struct hinic_hw_stats);
+}
+
+void hinic_get_chip_fault_stats(void *hwdev, u8 *chip_fault_stats, int offset)
+{
+ int copy_len = offset + MAX_DRV_BUF_SIZE - HINIC_CHIP_FAULT_SIZE;
+
+ if (offset + MAX_DRV_BUF_SIZE <= HINIC_CHIP_FAULT_SIZE)
+ memcpy(chip_fault_stats,
+ ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset,
+ MAX_DRV_BUF_SIZE);
+ else
+ memcpy(chip_fault_stats,
+ ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset,
+ copy_len);
+}
+
+int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!dev->nic_io)
+ return -EINVAL;
+
+ nic_cfg = &dev->nic_io->nic_cfg;
+
+ *pf_bw_limit = nic_cfg->pf_bw_limit;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c
b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c
new file mode 100644
index 000000000000..30398ee0cc4b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c
@@ -0,0 +1,993 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_wq.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic.h"
+#include "hinic_ctx_def.h"
+
+#define HINIC_DEAULT_TX_CI_PENDING_LIMIT 0
+#define HINIC_DEAULT_TX_CI_COALESCING_TIME 0
+
+static unsigned char tx_pending_limit = HINIC_DEAULT_TX_CI_PENDING_LIMIT;
+module_param(tx_pending_limit, byte, 0444);
+MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit
(default=0)");
+
+static unsigned char tx_coalescing_time = HINIC_DEAULT_TX_CI_COALESCING_TIME;
+module_param(tx_coalescing_time, byte, 0444);
+MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time
(default=0)");
+
+#define WQ_PREFETCH_MAX 4
+#define WQ_PREFETCH_MIN 1
+#define WQ_PREFETCH_THRESHOLD 256
+
+struct hinic_qp_ctxt_header {
+ u16 num_queues;
+ u16 queue_type;
+ u32 addr_offset;
+};
+
+struct hinic_sq_ctxt {
+ u32 ceq_attr;
+
+ u32 ci_owner;
+
+ u32 wq_pfn_hi;
+ u32 wq_pfn_lo;
+
+ u32 pref_cache;
+ u32 pref_owner;
+ u32 pref_wq_pfn_hi_ci;
+ u32 pref_wq_pfn_lo;
+
+ u32 rsvd8;
+ u32 rsvd9;
+
+ u32 wq_block_pfn_hi;
+ u32 wq_block_pfn_lo;
+};
+
+struct hinic_rq_ctxt {
+ u32 ceq_attr;
+
+ u32 pi_intr_attr;
+
+ u32 wq_pfn_hi_ci;
+ u32 wq_pfn_lo;
+
+ u32 pref_cache;
+ u32 pref_owner;
+
+ u32 pref_wq_pfn_hi_ci;
+ u32 pref_wq_pfn_lo;
+
+ u32 pi_paddr_hi;
+ u32 pi_paddr_lo;
+
+ u32 wq_block_pfn_hi;
+ u32 wq_block_pfn_lo;
+};
+
+struct hinic_sq_ctxt_block {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_rq_ctxt_block {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_sq_db {
+ u32 db_info;
+};
+
+struct hinic_clean_queue_ctxt {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ u32 ctxt_size;
+};
+
+static int init_sq(struct hinic_sq *sq, struct hinic_wq *wq, u16 q_id,
+ u16 sq_msix_idx, void *cons_idx_addr, void __iomem *db_addr)
+{
+ sq->wq = wq;
+ sq->q_id = q_id;
+ sq->owner = 1;
+ sq->msix_entry_idx = sq_msix_idx;
+
+ sq->cons_idx_addr = cons_idx_addr;
+ sq->db_addr = db_addr;
+
+ return 0;
+}
+
+static int init_rq(struct hinic_rq *rq, void *dev_hdl, struct hinic_wq *wq,
+ u16 q_id, u16 rq_msix_idx)
+{
+ rq->wq = wq;
+ rq->q_id = q_id;
+
+ rq->msix_entry_idx = rq_msix_idx;
+
+ rq->pi_virt_addr = dma_zalloc_coherent(dev_hdl, PAGE_SIZE,
+ &rq->pi_dma_addr, GFP_KERNEL);
+ if (!rq->pi_virt_addr) {
+ nic_err(dev_hdl, "Failed to allocate rq pi virtual addr\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void clean_rq(struct hinic_rq *rq, void *dev_hdl)
+{
+ dma_free_coherent(dev_hdl, PAGE_SIZE, rq->pi_virt_addr,
+ rq->pi_dma_addr);
+}
+
+static int create_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp,
+ u16 q_id, u16 qp_msix_idx, int max_sq_sge)
+{
+ struct hinic_sq *sq = &qp->sq;
+ struct hinic_rq *rq = &qp->rq;
+ void __iomem *db_addr;
+ int err;
+
+ err = hinic_wq_allocate(&nic_io->wqs, &nic_io->sq_wq[q_id],
+ HINIC_SQ_WQEBB_SIZE,
+ nic_io->hwdev->wq_page_size, nic_io->sq_depth,
+ MAX_WQE_SIZE(max_sq_sge, HINIC_SQ_WQEBB_SIZE));
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for SQ\n");
+ return err;
+ }
+
+ err = hinic_wq_allocate(&nic_io->wqs, &nic_io->rq_wq[q_id],
+ HINIC_RQ_WQE_SIZE, nic_io->hwdev->wq_page_size,
+ nic_io->rq_depth, HINIC_RQ_WQE_SIZE);
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for RQ\n");
+ goto rq_alloc_err;
+ }
+
+ /* we don't use direct wqe for sq */
+ err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr, NULL);
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to alloc sq doorbell addr\n");
+ goto alloc_db_err;
+ }
+
+ err = init_sq(sq, &nic_io->sq_wq[q_id], q_id, qp_msix_idx,
+ HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id), db_addr);
+ if (err != 0) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to init sq\n");
+ goto sq_init_err;
+ }
+
+ err = init_rq(rq, nic_io->hwdev->dev_hdl, &nic_io->rq_wq[q_id],
+ q_id, qp_msix_idx);
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to init rq\n");
+ goto rq_init_err;
+ }
+
+ return 0;
+
+rq_init_err:
+sq_init_err:
+ hinic_free_db_addr(nic_io->hwdev, db_addr, NULL);
+
+alloc_db_err:
+ hinic_wq_free(&nic_io->wqs, &nic_io->rq_wq[q_id]);
+
+rq_alloc_err:
+ hinic_wq_free(&nic_io->wqs, &nic_io->sq_wq[q_id]);
+
+ return err;
+}
+
+static void destroy_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp)
+{
+ clean_rq(&qp->rq, nic_io->hwdev->dev_hdl);
+
+ hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr, NULL);
+
+ hinic_wq_free(&nic_io->wqs, qp->sq.wq);
+ hinic_wq_free(&nic_io->wqs, qp->rq.wq);
+}
+
+/* alloc qps and init qps ctxt */
+int hinic_create_qps(void *dev, u16 num_qp, u16 sq_depth, u16 rq_depth,
+ struct irq_info *qps_msix_arry, int max_sq_sge)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_nic_io *nic_io;
+ u16 q_id, i, max_qps;
+ int err;
+
+ if (!hwdev || !qps_msix_arry)
+ return -EFAULT;
+
+ max_qps = hinic_func_max_qnum(hwdev);
+ if (num_qp > max_qps) {
+ nic_err(hwdev->dev_hdl, "Create number of qps: %d > max number of
qps:%d\n",
+ num_qp, max_qps);
+ return -EINVAL;
+ }
+
+ nic_io = hwdev->nic_io;
+
+ nic_io->max_qps = max_qps;
+ nic_io->num_qps = num_qp;
+ nic_io->sq_depth = sq_depth;
+ nic_io->rq_depth = rq_depth;
+
+ err = hinic_wqs_alloc(&nic_io->wqs, 2 * num_qp, hwdev->dev_hdl);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate WQS for IO\n");
+ return err;
+ }
+
+ nic_io->qps = kcalloc(num_qp, sizeof(*nic_io->qps), GFP_KERNEL);
+ if (!nic_io->qps) {
+ err = -ENOMEM;
+ goto alloc_qps_err;
+ }
+
+ nic_io->ci_vaddr_base =
+ dma_zalloc_coherent(hwdev->dev_hdl,
+ CI_TABLE_SIZE(num_qp, PAGE_SIZE),
+ &nic_io->ci_dma_base, GFP_KERNEL);
+ if (!nic_io->ci_vaddr_base) {
+ err = -ENOMEM;
+ goto ci_base_err;
+ }
+
+ nic_io->sq_wq = kcalloc(num_qp, sizeof(*nic_io->sq_wq), GFP_KERNEL);
+ if (!nic_io->sq_wq) {
+ err = -ENOMEM;
+ goto sq_wq_err;
+ }
+
+ nic_io->rq_wq = kcalloc(num_qp, sizeof(*nic_io->rq_wq), GFP_KERNEL);
+ if (!nic_io->rq_wq) {
+ err = -ENOMEM;
+ goto rq_wq_err;
+ }
+
+ for (q_id = 0; q_id < num_qp; q_id++) {
+ err = create_qp(nic_io, &nic_io->qps[q_id], q_id,
+ qps_msix_arry[q_id].msix_entry_idx, max_sq_sge);
+ if (err) {
+ nic_err(hwdev->dev_hdl,
+ "Failed to allocate qp %d, err: %d\n",
+ q_id, err);
+ goto create_qp_err;
+ }
+ }
+
+ return 0;
+
+create_qp_err:
+ for (i = 0; i < q_id; i++)
+ destroy_qp(nic_io, &nic_io->qps[i]);
+
+ kfree(nic_io->rq_wq);
+
+rq_wq_err:
+ kfree(nic_io->sq_wq);
+
+sq_wq_err:
+ dma_free_coherent(hwdev->dev_hdl, CI_TABLE_SIZE(num_qp, PAGE_SIZE),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ci_base_err:
+ kfree(nic_io->qps);
+
+alloc_qps_err:
+ hinic_wqs_free(&nic_io->wqs);
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_create_qps);
+
+void hinic_free_qps(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_nic_io *nic_io;
+ u16 i;
+
+ if (!hwdev)
+ return;
+
+ nic_io = hwdev->nic_io;
+
+ for (i = 0; i < nic_io->num_qps; i++)
+ destroy_qp(nic_io, &nic_io->qps[i]);
+
+ kfree(nic_io->rq_wq);
+ kfree(nic_io->sq_wq);
+
+ dma_free_coherent(hwdev->dev_hdl,
+ CI_TABLE_SIZE(nic_io->num_qps, PAGE_SIZE),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ kfree(nic_io->qps);
+
+ hinic_wqs_free(&nic_io->wqs);
+}
+EXPORT_SYMBOL(hinic_free_qps);
+
+void hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
+ enum hinic_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 max_queues, u16 q_id)
+{
+ qp_ctxt_hdr->queue_type = ctxt_type;
+ qp_ctxt_hdr->num_queues = num_queues;
+
+ if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
+ qp_ctxt_hdr->addr_offset =
+ SQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+ else
+ qp_ctxt_hdr->addr_offset =
+ RQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+
+ qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
+
+ hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
+}
+
+void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,
+ struct hinic_sq_ctxt *sq_ctxt)
+{
+ struct hinic_wq *wq = sq->wq;
+ u64 wq_page_addr;
+ u64 wq_page_pfn, wq_block_pfn;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (u16)wq->cons_idx;
+ pi_start = (u16)wq->prod_idx;
+
+ /* read the first page from the HW table*/
+ wq_page_addr = be64_to_cpu(*wq->block_vaddr);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ /* If only one page, use 0-level CLA */
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr);
+
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |
+ SQ_CTXT_CEQ_ATTR_SET(0, EN);
+
+ sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |
+ SQ_CTXT_CI_SET(1, OWNER);
+
+ sq_ctxt->wq_pfn_hi =
+ SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
+
+ sq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ sq_ctxt->pref_cache =
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ sq_ctxt->pref_owner = 1;
+
+ sq_ctxt->pref_wq_pfn_hi_ci =
+ SQ_CTXT_PREF_SET(ci_start, CI) |
+ SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);
+
+ sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+ sq_ctxt->wq_block_pfn_hi =
+ SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
+}
+
+void hinic_rq_prepare_ctxt(struct hinic_rq *rq, struct hinic_rq_ctxt *rq_ctxt)
+{
+ struct hinic_wq *wq = rq->wq;
+ u64 wq_page_addr;
+ u64 wq_page_pfn, wq_block_pfn;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (u16)wq->cons_idx;
+ pi_start = (u16)wq->prod_idx;
+ pi_start = pi_start & wq->mask;
+
+ /* read the first page from the HW table*/
+ wq_page_addr = be64_to_cpu(*wq->block_vaddr);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr);
+
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
+
+ rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
+ RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR);
+
+ rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
+
+ rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pref_cache =
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ rq_ctxt->pref_owner = 1;
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI);
+
+ rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
+ rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
+
+ rq_ctxt->wq_block_pfn_hi =
+ RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
+}
+
+static int init_sq_ctxts(struct hinic_nic_io *nic_io)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_sq_ctxt_block *sq_ctxt_block;
+ struct hinic_sq_ctxt *sq_ctxt;
+ struct hinic_cmd_buf *cmd_buf;
+ struct hinic_qp *qp;
+ u64 out_param = 0;
+ u16 q_id, curr_id, global_qpn, max_ctxts, i;
+ int err = 0;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ sq_ctxt_block = cmd_buf->buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ?
+ HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+ HINIC_QP_CTXT_TYPE_SQ, max_ctxts,
+ nic_io->max_qps, q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ qp = &nic_io->qps[curr_id];
+ global_qpn = nic_io->global_qpn + curr_id;
+
+ hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);
+ }
+
+ cmd_buf->size = SQ_CTXT_SIZE(max_ctxts);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ nic_err(hwdev->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param:
0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_rq_ctxts(struct hinic_nic_io *nic_io)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_rq_ctxt_block *rq_ctxt_block;
+ struct hinic_rq_ctxt *rq_ctxt;
+ struct hinic_cmd_buf *cmd_buf;
+ struct hinic_qp *qp;
+ u64 out_param = 0;
+ u16 q_id, curr_id, max_ctxts, i;
+ int err = 0;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ rq_ctxt_block = cmd_buf->buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ?
+ HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+ HINIC_QP_CTXT_TYPE_RQ, max_ctxts,
+ nic_io->max_qps, q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ qp = &nic_io->qps[curr_id];
+
+ hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);
+ }
+
+ cmd_buf->size = RQ_CTXT_SIZE(max_ctxts);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+
+ if (err || out_param != 0) {
+ nic_err(hwdev->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param:
0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_qp_ctxts(struct hinic_nic_io *nic_io)
+{
+ int err;
+
+ err = init_sq_ctxts(nic_io);
+ if (err)
+ return err;
+
+ err = init_rq_ctxts(nic_io);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,
+ enum hinic_qp_ctxt_type ctxt_type)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_clean_queue_ctxt *ctxt_block;
+ struct hinic_cmd_buf *cmd_buf;
+ u64 out_param = 0;
+ int err;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctxt_block = cmd_buf->buf;
+ ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;
+ ctxt_block->cmdq_hdr.queue_type = ctxt_type;
+ ctxt_block->cmdq_hdr.addr_offset = 0;
+
+ /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
+ ctxt_block->ctxt_size = 0x3;
+
+ hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
+
+ cmd_buf->size = sizeof(*ctxt_block);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+
+ if ((err) || (out_param)) {
+ nic_err(hwdev->dev_hdl, "Failed to clean queue offload ctxts, err: %d,
out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)
+{
+ /* clean LRO/TSO context space */
+ return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||
+ clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq*/
+int hinic_init_qp_ctxts(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_nic_io *nic_io;
+ struct hinic_sq_attr sq_attr;
+ u16 q_id;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = hwdev->nic_io;
+
+ err = init_qp_ctxts(nic_io);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to init QP ctxts\n");
+ return err;
+ }
+
+ /* clean LRO/TSO context space */
+ err = clean_qp_offload_ctxt(nic_io);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to clean qp offload ctxts\n");
+ return err;
+ }
+
+ err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,
+ nic_io->sq_depth, nic_io->rx_buff_len);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set root context\n");
+ return err;
+ }
+
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sq_attr.ci_dma_base =
+ HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;
+ sq_attr.pending_limit = tx_pending_limit;
+ sq_attr.coalescing_time = tx_coalescing_time;
+ sq_attr.intr_en = 1;
+ sq_attr.intr_idx = nic_io->qps[q_id].sq.msix_entry_idx;
+ sq_attr.l2nic_sqn = q_id;
+ sq_attr.dma_attr_off = 0;
+ err = hinic_set_ci_table(hwdev, q_id, &sq_attr);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set ci table\n");
+ goto set_cons_idx_table_err;
+ }
+ }
+
+ return 0;
+
+set_cons_idx_table_err:
+ hinic_clean_root_ctxt(hwdev);
+
+ return err;
+}
+
+void hinic_free_qp_ctxts(void *hwdev)
+{
+ int err;
+
+ if (!hwdev)
+ return;
+
+ err = hinic_clean_root_ctxt(hwdev);
+ if (err)
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to clean root ctxt\n");
+}
+EXPORT_SYMBOL(hinic_free_qp_ctxts);
+
+int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_io *nic_io;
+ u16 global_qpn;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = dev->nic_io;
+
+ err = hinic_get_base_qpn(hwdev, &global_qpn);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get base qpn\n");
+ return err;
+ }
+
+ nic_io->global_qpn = global_qpn;
+ nic_io->rx_buff_len = rx_buff_len;
+ err = hinic_init_function_table(hwdev, nic_io->rx_buff_len);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to init function table\n");
+ return err;
+ }
+
+ err = hinic_enable_fast_recycle(hwdev, false);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to disable fast recycle\n");
+ return err;
+ }
+
+ /* get default pf bandwidth from firmware witch setted by bios */
+ err = hinic_get_bios_pf_bw_limit(hwdev, &nic_io->nic_cfg.pf_bw_limit);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get pf bandwidth limit\n");
+ return err;
+ }
+
+ if (dev->func_mode == FUNC_MOD_MULTI_BM_MASTER ||
+ dev->func_mode == FUNC_MOD_MULTI_VM_MASTER) {
+ if (hinic_func_type(dev) != TYPE_VF) {
+ err = hinic_disable_tx_promisc(dev);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set tx promisc\n");
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+void hinic_free_nic_hwdev(void *hwdev)
+{
+ /* nothing to do for now */
+}
+EXPORT_SYMBOL(hinic_free_nic_hwdev);
+
+int hinic_enable_tx_irq(void *hwdev, u16 q_id)
+{
+ return hinic_set_arm_bit(hwdev, HINIC_SET_ARM_SQ, q_id);
+}
+EXPORT_SYMBOL(hinic_enable_tx_irq);
+
+int hinic_rx_tx_flush(void *hwdev)
+{
+ return hinic_func_rx_tx_flush(hwdev);
+}
+EXPORT_SYMBOL(hinic_rx_tx_flush);
+
+int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_wq *wq = &nic_io->sq_wq[q_id];
+
+ return atomic_read(&wq->delta) - 1;
+}
+EXPORT_SYMBOL(hinic_get_sq_free_wqebbs);
+
+int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return nic_io->rq_wq[q_id].delta.counter - 1;
+}
+EXPORT_SYMBOL(hinic_get_rq_free_wqebbs);
+
+u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return (u16)(nic_io->sq_wq[q_id].cons_idx & nic_io->sq_wq[q_id].mask);
+}
+EXPORT_SYMBOL(hinic_get_sq_local_ci);
+
+u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ return MASKED_SQ_IDX(sq, be16_to_cpu(*(u16 *)(sq->cons_idx_addr)));
+}
+EXPORT_SYMBOL(hinic_get_sq_hw_ci);
+
+void *hinic_get_sq_wqe(void *hwdev, u16 q_id, int wqebb_cnt, u16 *pi, u8 *owner)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+ void *wqe;
+
+ wqe = hinic_get_wqe(sq->wq, wqebb_cnt, pi);
+ if (wqe) {
+ *owner = sq->owner;
+ if ((*pi + wqebb_cnt) >= nic_io->sq_depth)
+ sq->owner = !sq->owner;
+ }
+
+ return wqe;
+}
+EXPORT_SYMBOL(hinic_get_sq_wqe);
+
+void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ if (owner != sq->owner)
+ sq->owner = owner;
+
+ atomic_add(num_wqebbs, &sq->wq->delta);
+ sq->wq->prod_idx -= num_wqebbs;
+}
+EXPORT_SYMBOL(hinic_return_sq_wqe);
+
+void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs, u16 *pi,
+ u8 *owner)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ *pi = MASKED_WQE_IDX(sq->wq, sq->wq->prod_idx);
+
+ atomic_sub(num_wqebbs, &sq->wq->delta);
+ sq->wq->prod_idx += num_wqebbs;
+
+ *owner = sq->owner;
+ if ((*pi + num_wqebbs) >= nic_io->sq_depth)
+ sq->owner = !sq->owner;
+}
+EXPORT_SYMBOL(hinic_update_sq_pi);
+
+static void sq_prepare_db(struct hinic_sq *sq, struct hinic_sq_db *db,
+ u16 prod_idx, int cos)
+{
+ u32 hi_prod_idx = SQ_DB_PI_HIGH(MASKED_SQ_IDX(sq, prod_idx));
+
+ db->db_info = SQ_DB_INFO_SET(hi_prod_idx, HI_PI) |
+ SQ_DB_INFO_SET(SQ_DB, TYPE) |
+ SQ_DB_INFO_SET(CFLAG_DATA_PATH, CFLAG) |
+ SQ_DB_INFO_SET(cos, COS) |
+ SQ_DB_INFO_SET(sq->q_id, QID);
+}
+
+static void sq_write_db(struct hinic_sq *sq, u16 prod_idx, int cos)
+{
+ struct hinic_sq_db sq_db;
+
+ sq_prepare_db(sq, &sq_db, prod_idx, cos);
+
+ /* Data should be written to HW in Big Endian Format */
+ sq_db.db_info = cpu_to_be32(sq_db.db_info);
+
+ wmb(); /* Write all before the doorbell */
+
+ writel(sq_db.db_info, SQ_DB_ADDR(sq, prod_idx));
+}
+
+void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe, int wqebb_cnt, int cos)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ if (wqebb_cnt != 1)
+ hinic_write_wqe(sq->wq, wqe, wqebb_cnt);
+
+ sq_write_db(sq, MASKED_SQ_IDX(sq, sq->wq->prod_idx), cos);
+}
+EXPORT_SYMBOL(hinic_send_sq_wqe);
+
+void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ sq->wq->cons_idx += wqebb_cnt;
+ atomic_add(wqebb_cnt, &sq->wq->delta);
+}
+EXPORT_SYMBOL(hinic_update_sq_local_ci);
+
+void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ return hinic_get_wqe(rq->wq, 1, pi);
+}
+EXPORT_SYMBOL(hinic_get_rq_wqe);
+
+void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ atomic_add(num_wqebbs, &rq->wq->delta);
+ rq->wq->prod_idx -= num_wqebbs;
+}
+EXPORT_SYMBOL(hinic_return_rq_wqe);
+
+void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ nic_io->qps[q_id].rq.wq->delta.counter -= num_wqebbs;
+}
+EXPORT_SYMBOL(hinic_update_rq_delta);
+
+void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ *rq->pi_virt_addr = cpu_to_be16(pi & rq->wq->mask);
+}
+EXPORT_SYMBOL(hinic_update_rq_hw_pi);
+
+u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return (u16)(nic_io->rq_wq[q_id].cons_idx & nic_io->rq_wq[q_id].mask);
+}
+EXPORT_SYMBOL(hinic_get_rq_local_ci);
+
+void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ nic_io->qps[q_id].rq.wq->cons_idx += wqe_cnt;
+ nic_io->qps[q_id].rq.wq->delta.counter += wqe_cnt;
+}
+EXPORT_SYMBOL(hinic_update_rq_local_ci);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h
b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h
new file mode 100644
index 000000000000..92b683f90f26
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HW_NIC_IO_H_
+#define HINIC_HW_NIC_IO_H_
+
+#include "hinic_hw_mgmt.h"
+#include "hinic_qe_def.h"
+
+#define HINIC_RX_BUF_SHIFT 11
+#define HINIC_RX_BUF_LEN 2048 /*buffer len must be 2^n*/
+
+#define SQ_CTRL_SET(val, member) ((u32)(val) << SQ_CTRL_##member##_SHIFT)
+
+int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len);
+void hinic_free_nic_hwdev(void *hwdev);
+
+/* alloc qps resource */
+int hinic_create_qps(void *hwdev, u16 qp_num, u16 sq_depth, u16 rq_depth,
+ struct irq_info *rq_msix_arry, int max_sq_sge);
+void hinic_free_qps(void *hwdev);
+
+/* init qps ctxt and set sq ci attr and arm all sq*/
+int hinic_init_qp_ctxts(void *hwdev);
+void hinic_free_qp_ctxts(void *hwdev);
+
+/* function table and root context set*/
+int hinic_set_parameters(void *hwdev, u8 *mac, u16 rx_buf_size, u32 mtu);
+void hinic_clear_parameters(void *hwdev);
+
+/* The function is internally invoked. set_arm_bit function*/
+int hinic_enable_tx_irq(void *hwdev, u16 q_id);
+
+int hinic_rx_tx_flush(void *hwdev);
+
+/* Obtain sq/rq number of idle wqebb*/
+int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id);
+int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id);
+
+u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id);
+u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id);
+
+void *hinic_get_sq_wqe(void *hwdev, u16 q_id,
+ int wqebb_cnt, u16 *pi, u8 *owner);
+
+void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner);
+
+void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs,
+ u16 *pi, u8 *owner);
+
+/* including cross-page process and press the doorbell 20170425*/
+void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe,
+ int wqebb_cnt, int cos);
+
+void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt);
+
+/* Refreshes the rq buff*/
+void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi);
+/* gupdate rq pi, is the latest pi, function does not need to calculate*/
+void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs);
+
+void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs);
+
+void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi);
+
+u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id);
+
+/* Clear rx done is not performed */
+void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt);
+
+struct hinic_sge {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+};
+
+void hinic_cpu_to_be32(void *data, int len);
+
+void hinic_be32_to_cpu(void *data, int len);
+
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len);
+
+dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge);
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
new file mode 100644
index 000000000000..548a242c1c3c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Copyright (C), 2001-2011, Huawei Tech. Co., Ltd.
+ *
+ * File Name : hinic_port_cmd.h
+ * Version : Initial Draft
+ * Author : Qu Huichun
+ * Created : 2018/5/29
+ * Last Modified :
+ * Description : Commands between NIC and uP
+ * Function List :
+ * History :
+ * 1.Date : 2018/5/29
+ * Author : Qu Huichun
+ * Modification: Created file
+ */
+
+#ifndef __HINIC_PORT_CMD_H__
+#define __HINIC_PORT_CMD_H__
+
+#ifdef __cplusplus
+ #if __cplusplus
+extern "C"{
+ #endif
+#endif /* __cplusplus */
+
+/* cmd of mgmt CPU message for NIC module */
+enum hinic_port_cmd {
+ HINIC_PORT_CMD_VF_REGISTER = 0x0,
+ /* not defined in base line, only for PFD and VFD */
+ HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
+ /* not defined in base line, only for PFD and VFD */
+
+ HINIC_PORT_CMD_CHANGE_MTU = 0x2,
+
+ HINIC_PORT_CMD_ADD_VLAN = 0x3,
+ HINIC_PORT_CMD_DEL_VLAN,
+
+ HINIC_PORT_CMD_SET_PFC = 0x5,
+ HINIC_PORT_CMD_GET_PFC,
+ HINIC_PORT_CMD_SET_ETS,
+ HINIC_PORT_CMD_GET_ETS,
+
+ HINIC_PORT_CMD_SET_MAC = 0x9,
+ HINIC_PORT_CMD_GET_MAC,
+ HINIC_PORT_CMD_DEL_MAC,
+
+ HINIC_PORT_CMD_SET_RX_MODE = 0xc,
+ HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xd,
+
+ HINIC_PORT_CMD_GET_AUTONEG_CAP = 0xf,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_AUTONET_STATE,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_SPEED,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_DUPLEX,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_MEDIA_TYPE,
+ /* not defined in base line */
+
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14,
+ HINIC_PORT_CMD_SET_PAUSE_INFO,
+
+ HINIC_PORT_CMD_GET_LINK_STATE = 0x18,
+ HINIC_PORT_CMD_SET_LRO = 0x19,
+ HINIC_PORT_CMD_SET_RX_CSUM = 0x1a,
+ HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1b,
+
+ HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1c,
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS,
+ HINIC_PORT_CMD_GET_VPORT_STAT,
+ HINIC_PORT_CMD_CLEAN_VPORT_STAT,
+
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25,
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL,
+
+ HINIC_PORT_CMD_SET_PORT_ENABLE = 0x29,
+ HINIC_PORT_CMD_GET_PORT_ENABLE,
+
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2b,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
+ HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ HINIC_PORT_CMD_SET_RSS_CTX_TBL,
+ HINIC_PORT_CMD_RSS_TEMP_MGR,
+
+ /* 0x36 ~ 0x40 have defined in base line*/
+
+ HINIC_PORT_CMD_RSS_CFG = 0x42,
+
+ HINIC_PORT_CMD_GET_PHY_TYPE = 0x44,
+ HINIC_PORT_CMD_INIT_FUNC = 0x45,
+ HINIC_PORT_CMD_SET_LLI_PRI = 0x46,
+
+ HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48,
+ HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+
+ HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4a,
+ HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE,
+
+ /* 0x4c ~ 0x57 have defined in base line*/
+ HINIC_PORT_CMD_DISABLE_PROMISIC = 0x4c,
+
+ HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
+ HINIC_PORT_CMD_GET_BOOT_VERSION,
+ HINIC_PORT_CMD_GET_MICROCODE_VERSION,
+
+ HINIC_PORT_CMD_GET_PORT_TYPE = 0x5b,
+ /* not defined in base line */
+
+ HINIC_PORT_CMD_GET_VPORT_ENABLE = 0x5c,
+ HINIC_PORT_CMD_SET_VPORT_ENABLE,
+
+ HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5e,
+
+ HINIC_PORT_CMD_SET_LED_TEST = 0x5f,
+
+ HINIC_PORT_CMD_SET_LLI_STATE = 0x60,
+ HINIC_PORT_CMD_SET_LLI_TYPE,
+ HINIC_PORT_CMD_GET_LLI_CFG,
+
+ HINIC_PORT_CMD_GET_LRO = 0x63,
+
+ HINIC_PORT_CMD_GET_DMA_CS = 0x64,
+ HINIC_PORT_CMD_SET_DMA_CS,
+
+ HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66,
+
+ HINIC_PORT_CMD_SET_PFC_MISC = 0x67,
+ HINIC_PORT_CMD_GET_PFC_MISC,
+
+ HINIC_PORT_CMD_SET_VF_RATE = 0x69,
+ HINIC_PORT_CMD_SET_VF_VLAN,
+ HINIC_PORT_CMD_CLR_VF_VLAN,
+
+ /* 0x6c,0x6e have defined in base line*/
+ HINIC_PORT_CMD_SET_UCAPTURE_OPT = 0x6F,
+
+ HINIC_PORT_CMD_SET_TSO = 0x70,
+ HINIC_PORT_CMD_SET_PHY_POWER = 0x71,
+ HINIC_PORT_CMD_UPDATE_FW = 0x72,
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73,
+ /* not defined in base line */
+ HINIC_PORT_CMD_SET_PFC_THD = 0x75,
+ /* not defined in base line */
+ HINIC_PORT_CMD_SET_PORT_LINK_STATUS = 0x76,
+ HINIC_PORT_CMD_SET_CGE_PAUSE_TIME_CFG = 0x77,
+
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xa0,
+
+ HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xa3,
+ HINIC_PORT_CMD_UPDATE_MAC = 0xa4,
+
+ HINIC_PORT_CMD_GET_UART_LOG = 0xa5,
+ HINIC_PORT_CMD_SET_UART_LOG,
+
+ HINIC_PORT_CMD_GET_PORT_INFO = 0xaa,
+
+ HINIC_MISC_SET_FUNC_SF_ENBITS = 0xab,
+ /* not defined in base line */
+ HINIC_MISC_GET_FUNC_SF_ENBITS,
+ /* not defined in base line */
+
+ HINIC_PORT_CMD_GET_SFP_INFO = 0xad,
+ HINIC_PORT_CMD_GET_FW_LOG = 0xca,
+ HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb,
+ HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc,
+
+ HINIC_PORT_CMD_SET_IQ_ENABLE = 0xd6,
+
+ HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
+ HINIC_PORT_CMD_SET_SPEED = 0xDA,
+ HINIC_PORT_CMD_SET_AUTONEG = 0xDB,
+
+ HINIC_PORT_CMD_CLEAR_SQ_RES = 0xDD,
+ HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE,
+ HINIC_PORT_CMD_SET_VF_COS = 0xDF,
+ HINIC_PORT_CMD_GET_VF_COS = 0xE1,
+
+ HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5,
+ HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6,
+
+ HINIC_PORT_CMD_SET_PORT_FUNCS_STATE = 0xE7,
+ HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8,
+
+ HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB,
+ HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
+
+ HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3,
+ HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4,
+
+ HINIC_PORT_CMD_SET_VHD_CFG = 0xF7,
+ HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8,
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9,
+ HINIC_PORT_CMD_SET_RXQ_LRO_ADPT = 0xFA,
+ HINIC_PORT_CMD_Q_FILTER = 0xFC,
+ HINIC_PORT_CMD_TCAM_FILTER = 0xFE,
+ HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF,
+};
+
+/* cmd of mgmt CPU message for HW module */
+enum hinic_mgmt_cmd {
+ HINIC_MGMT_CMD_RESET_MGMT = 0x0,
+ HINIC_MGMT_CMD_START_FLR = 0x1,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL = 0x2,
+ HINIC_MGMT_CMD_GET_IO_STATUS = 0x3,
+ HINIC_MGMT_CMD_DMA_ATTR_SET = 0x4,
+
+ HINIC_MGMT_CMD_CMDQ_CTXT_SET = 0x10,
+ HINIC_MGMT_CMD_CMDQ_CTXT_GET,
+
+ HINIC_MGMT_CMD_VAT_SET = 0x12,
+ HINIC_MGMT_CMD_VAT_GET,
+
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14,
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET,
+
+ HINIC_MGMT_CMD_MQM_FIX_INFO_GET = 0x16,
+ HINIC_MGMT_CMD_MQM_CFG_INFO_SET = 0x18,
+ HINIC_MGMT_MQM_SRCH_GPA_SET = 0x20,
+ HINIC_MGMT_CMD_PPF_TMR_SET = 0x22,
+ HINIC_MGMT_CMD_PPF_HT_GPA_SET = 0x23,
+ HINIC_MGMT_CMD_RES_STATE_SET = 0x24,
+ HINIC_MGMT_CMD_FUNC_CACHE_OUT = 0x25,
+ HINIC_MGMT_CMD_FFM_SET = 0x26,
+ HINIC_MGMT_CMD_SMF_TMR_CLEAR = 0x27,
+ /* 0x29 not defined in base line,
+ * only used in open source driver
+ */
+ HINIC_MGMT_CMD_FUNC_RES_CLEAR = 0x29,
+
+ HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32,
+
+ HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+
+ HINIC_MGMT_CMD_VF_RANDOM_ID_SET = 0x36,
+ HINIC_MGMT_CMD_FAULT_REPORT = 0x37,
+ HINIC_MGMT_CMD_HEART_LOST_REPORT = 0x38,
+
+ HINIC_MGMT_CMD_VPD_SET = 0x40,
+ HINIC_MGMT_CMD_VPD_GET,
+ HINIC_MGMT_CMD_LABEL_SET,
+ HINIC_MGMT_CMD_LABEL_GET,
+ HINIC_MGMT_CMD_SATIC_MAC_SET,
+ HINIC_MGMT_CMD_SATIC_MAC_GET,
+ HINIC_MGMT_CMD_SYNC_TIME = 0x46,
+
+ HINIC_MGMT_CMD_REG_READ = 0x48,
+
+ HINIC_MGMT_CMD_SET_LED_STATUS = 0x4A,
+ HINIC_MGMT_CMD_L2NIC_RESET = 0x4b,
+ HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET = 0x4d,
+ HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT = 0x4E,
+ HINIC_MGMT_CMD_ACTIVATE_FW = 0x4F,
+ HINIC_MGMT_CMD_PAGESIZE_SET = 0x50,
+ HINIC_MGMT_CMD_PAGESIZE_GET = 0x51,
+ HINIC_MGMT_CMD_GET_BOARD_INFO = 0x52,
+ HINIC_MGMT_CMD_WATCHDOG_INFO = 0x56,
+ HINIC_MGMT_CMD_FMW_ACT_NTC = 0x57,
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61,
+ HINIC_MGMT_CMD_GET_PPF_STATE = 0x63,
+ HINIC_MGMT_CMD_PCIE_DFX_NTC = 0x65,
+ HINIC_MGMT_CMD_PCIE_DFX_GET = 0x66,
+
+ HINIC_MGMT_CMD_GET_HOST_INFO = 0x67,
+
+ HINIC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A,
+ HINIC_MGMT_CMD_HEARTBEAT_SUPPORTED = 0x6B,
+ HINIC_MGMT_HEARTBEAT_EVENT = 0x6C,
+ HINIC_MGMT_CMD_GET_HW_PF_INFOS = 0x6D,
+ HINIC_MGMT_CMD_GET_SDI_MODE = 0x6E,
+};
+
+/* uCode relates commands */
+enum hinic_ucode_cmd {
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ HINIC_UCODE_CMD_ARM_SQ,
+ HINIC_UCODE_CMD_ARM_RQ,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_SET_IQ_ENABLE,
+ HINIC_UCODE_CMD_SET_RQ_FLUSH = 10
+};
+
+/* software cmds, vf->pf and multi-host */
+enum hinic_sw_funcs_cmd {
+ HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER = 0x0,
+ HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER = 0x1,
+ HINIC_SW_GET_SLAVE_FUNC_NIC_STATE = 0x2,
+ HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE = 0x3,
+ HINIC_SW_CMD_SEND_MSG_TO_VF = 0x4,
+};
+
+enum sq_l4offload_type {
+ OFFLOAD_DISABLE = 0,
+ TCP_OFFLOAD_ENABLE = 1,
+ SCTP_OFFLOAD_ENABLE = 2,
+ UDP_OFFLOAD_ENABLE = 3,
+};
+
+enum sq_vlan_offload_flag {
+ VLAN_OFFLOAD_DISABLE = 0,
+ VLAN_OFFLOAD_ENABLE = 1,
+};
+
+enum sq_pkt_parsed_flag {
+ PKT_NOT_PARSED = 0,
+ PKT_PARSED = 1,
+};
+
+enum sq_l3_type {
+ UNKNOWN_L3TYPE = 0,
+ IPV6_PKT = 1,
+ IPV4_PKT_NO_CHKSUM_OFFLOAD = 2,
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3,
+};
+
+enum sq_md_type {
+ UNKNOWN_MD_TYPE = 0,
+};
+
+enum sq_l2type {
+ ETHERNET = 0,
+};
+
+enum sq_tunnel_l4_type {
+ NOT_TUNNEL,
+ TUNNEL_UDP_NO_CSUM,
+ TUNNEL_UDP_CSUM,
+};
+
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+
+#define HINIC_RSS_TYPE_VALID_SHIFT 23
+#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24
+#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25
+#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26
+#define HINIC_RSS_TYPE_IPV6_SHIFT 27
+#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28
+#define HINIC_RSS_TYPE_IPV4_SHIFT 29
+#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30
+#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31
+
+#define HINIC_RSS_TYPE_SET(val, member) \
+ (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT)
+
+#define HINIC_RSS_TYPE_GET(val, member) \
+ (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
+
+enum hinic_speed {
+ HINIC_SPEED_10MB_LINK = 0,
+ HINIC_SPEED_100MB_LINK,
+ HINIC_SPEED_1000MB_LINK,
+ HINIC_SPEED_10GB_LINK,
+ HINIC_SPEED_25GB_LINK,
+ HINIC_SPEED_40GB_LINK,
+ HINIC_SPEED_100GB_LINK,
+ HINIC_SPEED_UNKNOWN = 0xFF,
+};
+
+/* In order to adapt different linux version */
+enum {
+ HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+};
+
+#define HINIC_AF0_FUNC_GLOBAL_IDX_SHIFT 0
+#define HINIC_AF0_P2P_IDX_SHIFT 10
+#define HINIC_AF0_PCI_INTF_IDX_SHIFT 14
+#define HINIC_AF0_VF_IN_PF_SHIFT 16
+#define HINIC_AF0_FUNC_TYPE_SHIFT 24
+
+#define HINIC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF
+#define HINIC_AF0_P2P_IDX_MASK 0xF
+#define HINIC_AF0_PCI_INTF_IDX_MASK 0x3
+#define HINIC_AF0_VF_IN_PF_MASK 0xFF
+#define HINIC_AF0_FUNC_TYPE_MASK 0x1
+
+#define HINIC_AF0_GET(val, member) \
+ (((val) >> HINIC_AF0_##member##_SHIFT) & HINIC_AF0_##member##_MASK)
+
+#define HINIC_AF1_PPF_IDX_SHIFT 0
+#define HINIC_AF1_AEQS_PER_FUNC_SHIFT 8
+#define HINIC_AF1_CEQS_PER_FUNC_SHIFT 12
+#define HINIC_AF1_IRQS_PER_FUNC_SHIFT 20
+#define HINIC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24
+#define HINIC_AF1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_AF1_PF_INIT_STATUS_SHIFT 31
+
+#define HINIC_AF1_PPF_IDX_MASK 0x1F
+#define HINIC_AF1_AEQS_PER_FUNC_MASK 0x3
+#define HINIC_AF1_CEQS_PER_FUNC_MASK 0x7
+#define HINIC_AF1_IRQS_PER_FUNC_MASK 0xF
+#define HINIC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7
+#define HINIC_AF1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_AF1_PF_INIT_STATUS_MASK 0x1
+
+#define HINIC_AF1_GET(val, member) \
+ (((val) >> HINIC_AF1_##member##_SHIFT) & HINIC_AF1_##member##_MASK)
+
+#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_SHIFT 16
+#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
+
+#define HINIC_AF2_GET(val, member) \
+ (((val) >> HINIC_AF2_##member##_SHIFT) & HINIC_AF2_##member##_MASK)
+
+#define HINIC_AF4_OUTBOUND_CTRL_SHIFT 0
+#define HINIC_AF4_DOORBELL_CTRL_SHIFT 1
+#define HINIC_AF4_OUTBOUND_CTRL_MASK 0x1
+#define HINIC_AF4_DOORBELL_CTRL_MASK 0x1
+
+#define HINIC_AF4_GET(val, member) \
+ (((val) >> HINIC_AF4_##member##_SHIFT) & HINIC_AF4_##member##_MASK)
+
+#define HINIC_AF4_SET(val, member) \
+ (((val) & HINIC_AF4_##member##_MASK) << HINIC_AF4_##member##_SHIFT)
+
+#define HINIC_AF4_CLEAR(val, member) \
+ ((val) & (~(HINIC_AF4_##member##_MASK << \
+ HINIC_AF4_##member##_SHIFT)))
+
+#define HINIC_AF5_PF_STATUS_SHIFT 0
+#define HINIC_AF5_PF_STATUS_MASK 0xFFFF
+
+#define HINIC_AF5_SET(val, member) \
+ (((val) & HINIC_AF5_##member##_MASK) << HINIC_AF5_##member##_SHIFT)
+
+#define HINIC_AF5_GET(val, member) \
+ (((val) >> HINIC_AF5_##member##_SHIFT) & HINIC_AF5_##member##_MASK)
+
+#define HINIC_AF5_CLEAR(val, member) \
+ ((val) & (~(HINIC_AF5_##member##_MASK << \
+ HINIC_AF5_##member##_SHIFT)))
+
+#define HINIC_PPF_ELECTION_IDX_SHIFT 0
+
+#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
+
+#define HINIC_PPF_ELECTION_SET(val, member) \
+ (((val) & HINIC_PPF_ELECTION_##member##_MASK) << \
+ HINIC_PPF_ELECTION_##member##_SHIFT)
+
+#define HINIC_PPF_ELECTION_GET(val, member) \
+ (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
+ HINIC_PPF_ELECTION_##member##_MASK)
+
+#define HINIC_PPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
+ << HINIC_PPF_ELECTION_##member##_SHIFT)))
+
+#define HINIC_MPF_ELECTION_IDX_SHIFT 0
+
+#define HINIC_MPF_ELECTION_IDX_MASK 0x1F
+
+#define HINIC_MPF_ELECTION_SET(val, member) \
+ (((val) & HINIC_MPF_ELECTION_##member##_MASK) << \
+ HINIC_MPF_ELECTION_##member##_SHIFT)
+
+#define HINIC_MPF_ELECTION_GET(val, member) \
+ (((val) >> HINIC_MPF_ELECTION_##member##_SHIFT) & \
+ HINIC_MPF_ELECTION_##member##_MASK)
+
+#define HINIC_MPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HINIC_MPF_ELECTION_##member##_MASK \
+ << HINIC_MPF_ELECTION_##member##_SHIFT)))
+
+#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
+#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
+#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
+#define HINIC_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx)
+#define HINIC_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf)
+#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
+#define HINIC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx)
+
+#define HINIC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type)
+#define HINIC_IS_PF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PF)
+#define HINIC_IS_VF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_VF)
+#define HINIC_IS_PPF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PPF)
+
+#define DB_IDX(db, db_base) \
+ ((u32)(((ulong)(db) - (ulong)(db_base)) / \
+ HINIC_DB_PAGE_SIZE))
+
+enum hinic_pcie_nosnoop {
+ HINIC_PCIE_SNOOP = 0,
+ HINIC_PCIE_NO_SNOOP = 1,
+};
+
+enum hinic_pcie_tph {
+ HINIC_PCIE_TPH_DISABLE = 0,
+ HINIC_PCIE_TPH_ENABLE = 1,
+};
+
+enum hinic_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hinic_doorbell_ctrl {
+ ENABLE_DOORBELL = 0x0,
+ DISABLE_DOORBELL = 0x1,
+};
+
+enum hinic_pf_status {
+ HINIC_PF_STATUS_INIT = 0X0,
+ HINIC_PF_STATUS_ACTIVE_FLAG = 0x11,
+ HINIC_PF_STATUS_FLR_START_FLAG = 0x12,
+ HINIC_PF_STATUS_FLR_FINISH_FLAG = 0x13,
+};
+
+/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128*/
+#define HINIC_DB_DWQE_SIZE 0x00080000
+
+/* db/dwqe page size: 4K */
+#define HINIC_DB_PAGE_SIZE 0x00001000ULL
+
+#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE)
+
+#define HINIC_PCI_MSIX_ENTRY_SIZE 16
+#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+
+#ifdef __cplusplus
+ #if __cplusplus
+}
+ #endif
+#endif /* __cplusplus */
+#endif /* __HINIC_PORT_CMD_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
new file mode 100644
index 000000000000..83c0cd7fccdb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __HINIC_QE_DEF_H__
+#define __HINIC_QE_DEF_H__
+
+#ifdef __cplusplus
+ #if __cplusplus
+extern "C"{
+ #endif
+#endif /* __cplusplus */
+
+#define HINIC_SQ_WQEBB_SIZE 64
+#define HINIC_RQ_WQE_SIZE 32
+#define HINIC_SQ_WQEBB_SHIFT 6
+#define HINIC_RQ_WQEBB_SHIFT 5
+
+#define HINIC_MAX_QUEUE_DEPTH 4096
+#define HINIC_MIN_QUEUE_DEPTH 128
+#define HINIC_TXD_ALIGN 1
+#define HINIC_RXD_ALIGN 1
+
+#define HINIC_SQ_DEPTH 1024
+#define HINIC_RQ_DEPTH 1024
+
+#define HINIC_RQ_WQE_MAX_SIZE 32
+
+#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3)//lint !e767
+
+/************** SQ_CTRL ***************/
+#define SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define SQ_CTRL_TASKSECT_LEN_SHIFT 16
+#define SQ_CTRL_DATA_FORMAT_SHIFT 22
+#define SQ_CTRL_LEN_SHIFT 29
+#define SQ_CTRL_OWNER_SHIFT 31
+
+#define SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU
+#define SQ_CTRL_TASKSECT_LEN_MASK 0x1FU
+#define SQ_CTRL_DATA_FORMAT_MASK 0x1U
+#define SQ_CTRL_LEN_MASK 0x3U
+#define SQ_CTRL_OWNER_MASK 0x1U
+
+#define SQ_CTRL_GET(val, member) (((val) >> SQ_CTRL_##member##_SHIFT) \
+ & SQ_CTRL_##member##_MASK)
+
+#define SQ_CTRL_CLEAR(val, member) ((val) & \
+ (~(SQ_CTRL_##member##_MASK << \
+ SQ_CTRL_##member##_SHIFT)))
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2
+#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10
+#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11
+#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12
+#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
+#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27
+#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28
+#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU
+#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) \
+ << SQ_CTRL_QUEUE_INFO_##member##_SHIFT)
+
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) \
+ & SQ_CTRL_QUEUE_INFO_##member##_MASK)
+
+#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \
+ ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \
+ SQ_CTRL_QUEUE_INFO_##member##_SHIFT)))
+
+#define SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
+#define SQ_TASK_INFO0_L4OFFLOAD_SHIFT 8
+#define SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
+#define SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
+#define SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
+#define SQ_TASK_INFO0_UFO_AVD_SHIFT 14
+#define SQ_TASK_INFO0_TSO_UFO_SHIFT 15
+#define SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
+
+#define SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFFU
+#define SQ_TASK_INFO0_L4OFFLOAD_MASK 0x3U
+#define SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3U
+#define SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1U
+#define SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1U
+#define SQ_TASK_INFO0_UFO_AVD_MASK 0x1U
+#define SQ_TASK_INFO0_TSO_UFO_MASK 0x1U
+#define SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFFU
+
+#define SQ_TASK_INFO0_SET(val, member) \
+ (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \
+ SQ_TASK_INFO0_##member##_SHIFT)
+#define SQ_TASK_INFO0_GET(val, member) \
+ (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \
+ SQ_TASK_INFO0_##member##_MASK)
+
+#define SQ_TASK_INFO1_MD_TYPE_SHIFT 8
+#define SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16
+#define SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24
+
+#define SQ_TASK_INFO1_MD_TYPE_MASK 0xFFU
+#define SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFFU
+#define SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFFU
+
+#define SQ_TASK_INFO1_SET(val, member) \
+ (((val) & SQ_TASK_INFO1_##member##_MASK) << \
+ SQ_TASK_INFO1_##member##_SHIFT)
+#define SQ_TASK_INFO1_GET(val, member) \
+ (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \
+ SQ_TASK_INFO1_##member##_MASK)
+
+#define SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0
+#define SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8
+#define SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16
+#define SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24
+
+#define SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFFU
+#define SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFFU
+#define SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7U
+#define SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3U
+
+#define SQ_TASK_INFO2_SET(val, member) \
+ (((val) & SQ_TASK_INFO2_##member##_MASK) << \
+ SQ_TASK_INFO2_##member##_SHIFT)
+#define SQ_TASK_INFO2_GET(val, member) \
+ (((val) >> SQ_TASK_INFO2_##member##_SHIFT) & \
+ SQ_TASK_INFO2_##member##_MASK)
+
+#define SQ_TASK_INFO4_L2TYPE_SHIFT 31
+
+#define SQ_TASK_INFO4_L2TYPE_MASK 0x1U
+
+#define SQ_TASK_INFO4_SET(val, member) \
+ (((u32)(val) & SQ_TASK_INFO4_##member##_MASK) << \
+ SQ_TASK_INFO4_##member##_SHIFT)
+
+/********************* SQ_DB *********************/
+#define SQ_DB_OFF 0x00000800
+#define SQ_DB_INFO_HI_PI_SHIFT 0
+#define SQ_DB_INFO_QID_SHIFT 8
+#define SQ_DB_INFO_CFLAG_SHIFT 23
+#define SQ_DB_INFO_COS_SHIFT 24
+#define SQ_DB_INFO_TYPE_SHIFT 27
+#define SQ_DB_INFO_HI_PI_MASK 0xFFU
+#define SQ_DB_INFO_QID_MASK 0x3FFU
+#define SQ_DB_INFO_CFLAG_MASK 0x1U
+#define SQ_DB_INFO_COS_MASK 0x7U
+#define SQ_DB_INFO_TYPE_MASK 0x1FU
+#define SQ_DB_INFO_SET(val, member) (((u32)(val) & \
+ SQ_DB_INFO_##member##_MASK) << \
+ SQ_DB_INFO_##member##_SHIFT)
+
+#define SQ_DB_PI_LOW_MASK 0xFF
+#define SQ_DB_PI_LOW(pi) ((pi) & SQ_DB_PI_LOW_MASK)
+#define SQ_DB_PI_HI_SHIFT 8
+#define SQ_DB_PI_HIGH(pi) ((pi) >> SQ_DB_PI_HI_SHIFT)
+#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_addr + SQ_DB_OFF) + \
+ SQ_DB_PI_LOW(pi))
+#define SQ_DB 1
+#define SQ_CFLAG_DP 0
+
+/*********************** RQ_CTRL ******************/
+#define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
+#define RQ_CTRL_COMPLETE_LEN_SHIFT 27
+#define RQ_CTRL_LEN_SHIFT 29
+
+#define RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU
+#define RQ_CTRL_COMPLETE_FORMAT_MASK 0x1U
+#define RQ_CTRL_COMPLETE_LEN_MASK 0x3U
+#define RQ_CTRL_LEN_MASK 0x3U
+
+#define RQ_CTRL_SET(val, member) (((val) & \
+ RQ_CTRL_##member##_MASK) << \
+ RQ_CTRL_##member##_SHIFT)
+
+#define RQ_CTRL_GET(val, member) (((val) >> \
+ RQ_CTRL_##member##_SHIFT) & \
+ RQ_CTRL_##member##_MASK)
+
+#define RQ_CTRL_CLEAR(val, member) ((val) & \
+ (~(RQ_CTRL_##member##_MASK << \
+ RQ_CTRL_##member##_SHIFT)))
+
+#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
+#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16
+#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25
+#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26
+#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27
+
+#define RQ_CQE_STATUS_BP_EN_SHIFT 30
+#define RQ_CQE_STATUS_RXDONE_SHIFT 31
+#define RQ_CQE_STATUS_FLUSH_SHIFT 28
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
+#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU
+#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U
+#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U
+#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U
+#define RQ_CQE_STATUS_BP_EN_MASK 0X1U
+#define RQ_CQE_STATUS_RXDONE_MASK 0x1U
+#define RQ_CQE_STATUS_FLUSH_MASK 0x1U
+
+#define RQ_CQE_STATUS_GET(val, member) (((val) >> \
+ RQ_CQE_STATUS_##member##_SHIFT) & \
+ RQ_CQE_STATUS_##member##_MASK)
+
+#define RQ_CQE_STATUS_CLEAR(val, member) ((val) & \
+ (~(RQ_CQE_STATUS_##member##_MASK << \
+ RQ_CQE_STATUS_##member##_SHIFT)))
+
+#define RQ_CQE_SGE_VLAN_SHIFT 0
+#define RQ_CQE_SGE_LEN_SHIFT 16
+
+#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU
+#define RQ_CQE_SGE_LEN_MASK 0xFFFFU
+
+#define RQ_CQE_SGE_GET(val, member) (((val) >> \
+ RQ_CQE_SGE_##member##_SHIFT) & \
+ RQ_CQE_SGE_##member##_MASK)
+
+#define RQ_CQE_PKT_NUM_SHIFT 1
+#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19
+#define RQ_CQE_PKT_LAST_LEN_SHIFT 6
+#define RQ_CQE_SUPER_CQE_EN_SHIFT 0
+
+#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU
+#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU
+#define RQ_CQE_PKT_NUM_MASK 0x1FU
+#define RQ_CQE_SUPER_CQE_EN_MASK 0x1
+
+#define RQ_CQE_PKT_NUM_GET(val, member) (((val) >> \
+ RQ_CQE_PKT_##member##_SHIFT) & \
+ RQ_CQE_PKT_##member##_MASK)
+#define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM)
+
+#define RQ_CQE_SUPER_CQE_EN_GET(val, member) (((val) >> \
+ RQ_CQE_##member##_SHIFT) & \
+ RQ_CQE_##member##_MASK)
+#define HINIC_GET_SUPER_CQE_EN(pkt_info) \
+ RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN)
+
+#define HINIC_GET_SUPER_CQE_EN_BE(pkt_info) ((pkt_info) & 0x1000000U)
+#define RQ_CQE_PKT_LEN_GET(val, member) (((val) >> \
+ RQ_CQE_PKT_##member##_SHIFT) & \
+ RQ_CQE_PKT_##member##_MASK)
+
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19
+#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U
+
+#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24
+#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU
+
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \
+ RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
+ RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
+
+#define RQ_CQE_PKT_TYPES_NON_L2_MASK 0x800U
+#define RQ_CQE_PKT_TYPES_L2_MASK 0x7FU
+
+#define RQ_CQE_STATUS_CSUM_BYPASS_VAL 0x80
+#define RQ_CQE_STATUS_CSUM_ERR_IP_MASK 0x31U
+#define RQ_CQE_STATUS_CSUM_ERR_L4_MASK 0x4EU
+
+#define SECT_SIZE_BYTES(size) ((size) << 3)
+
+#define HINIC_PF_SET_VF_ALREADY 0x4
+#define HINIC_MGMT_STATUS_EXIST 0x6
+
+#define WQS_BLOCKS_PER_PAGE 4
+
+#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size)
+
+#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
+ ((wq)->num_q_pages - 1))
+
+#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \
+ ((idx) & ((wq)->num_wqebbs_per_page - 1)))
+
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_PAGE_ADDR_SIZE_SHIFT 3
+#define WQ_PAGE_ADDR(wq, idx) \
+ (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
+ (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT)))
+
+#define WQ_BLOCK_SIZE 4096UL
+#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
+#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
+
+#define CMDQ_BLOCKS_PER_PAGE 8
+#define CMDQ_BLOCK_SIZE 512UL
+#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \
+ CMDQ_BLOCK_SIZE), PAGE_SIZE)
+
+#define ADDR_4K_ALIGNED(addr) (0 == ((addr) & 0xfff))
+#define ADDR_256K_ALIGNED(addr) (0 == ((addr) & 0x3ffff))
+
+#define WQ_BASE_VADDR(wqs, wq) \
+ (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \
+ + (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_ADDR(wqs, wq) \
+ (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
+ (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
+ (((u64)((cmdq_pages)->cmdq_page_paddr)) \
+ + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
+ (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
+
+#define WQE_SHADOW_PAGE(wq, wqe) \
+ (u16)(((ulong)(wqe) - (ulong)(wq)->shadow_wqe) \
+ / (wq)->max_wqe_size)
+
+#define WQE_IN_RANGE(wqe, start, end) \
+ (((ulong)(wqe) >= (ulong)(start)) && \
+ ((ulong)(wqe) < (ulong)(end)))
+
+#define WQ_NUM_PAGES(num_wqs) \
+ (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
+
+/* Qe buffer relates define */
+enum hinic_rx_buf_size {
+ HINIC_RX_BUF_SIZE_32B = 0x20,
+ HINIC_RX_BUF_SIZE_64B = 0x40,
+ HINIC_RX_BUF_SIZE_96B = 0x60,
+ HINIC_RX_BUF_SIZE_128B = 0x80,
+ HINIC_RX_BUF_SIZE_192B = 0xC0,
+ HINIC_RX_BUF_SIZE_256B = 0x100,
+ HINIC_RX_BUF_SIZE_384B = 0x180,
+ HINIC_RX_BUF_SIZE_512B = 0x200,
+ HINIC_RX_BUF_SIZE_768B = 0x300,
+ HINIC_RX_BUF_SIZE_1K = 0x400,
+ HINIC_RX_BUF_SIZE_1_5K = 0x600,
+ HINIC_RX_BUF_SIZE_2K = 0x800,
+ HINIC_RX_BUF_SIZE_3K = 0xC00,
+ HINIC_RX_BUF_SIZE_4K = 0x1000,
+ HINIC_RX_BUF_SIZE_8K = 0x2000,
+ HINIC_RX_BUF_SIZE_16K = 0x4000,
+};
+
+enum ppf_tmr_status {
+ HINIC_PPF_TMR_FLAG_STOP,
+ HINIC_PPF_TMR_FLAG_START,
+};
+
+enum hinic_res_state {
+ HINIC_RES_CLEAN = 0,
+ HINIC_RES_ACTIVE = 1,
+};
+
+#define DEFAULT_RX_BUF_SIZE ((u16)0xB)
+
+#define BUF_DESC_SIZE_SHIFT 4
+
+#define HINIC_SQ_WQE_SIZE(num_sge) \
+ (sizeof(struct hinic_sq_ctrl) + \
+ sizeof(struct hinic_sq_task) + \
+ (u32)((num_sge) << BUF_DESC_SIZE_SHIFT))
+
+#define HINIC_SQ_WQEBB_CNT(num_sge) \
+ (int)(ALIGN(HINIC_SQ_WQE_SIZE((u32)num_sge), \
+ HINIC_SQ_WQEBB_SIZE) >> HINIC_SQ_WQEBB_SHIFT)
+
+#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
+
+#define HINIC_GET_RSS_TYPES(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE)
+
+#define HINIC_GET_PKT_TYPES(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
+#define HINIC_GET_RX_PKT_TYPE(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
+#define HINIC_GET_RX_PKT_UMBCAST(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST)
+
+#define HINIC_GET_RX_VLAN_TAG(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, VLAN)
+
+#define HINIC_GET_RX_PKT_LEN(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, LEN)
+
+#define HINIC_GET_RX_CSUM_ERR(status) \
+ RQ_CQE_STATUS_GET(status, CSUM_ERR)
+
+#define HINIC_GET_RX_DONE(status) \
+ RQ_CQE_STATUS_GET(status, RXDONE)
+
+#define HINIC_GET_RX_FLUSH(status) \
+ RQ_CQE_STATUS_GET(status, FLUSH)
+
+#define HINIC_GET_RX_BP_EN(status) \
+ RQ_CQE_STATUS_GET(status, BP_EN)
+
+#define HINIC_GET_RX_NUM_LRO(status) \
+ RQ_CQE_STATUS_GET(status, NUM_LRO)
+
+#define HINIC_PKT_TYPES_UNKNOWN(pkt_types) \
+ ((pkt_types) & RQ_CQE_PKT_TYPES_NON_L2_MASK)
+
+#define HINIC_PKT_TYPES_L2(pkt_types) \
+ ((pkt_types) & RQ_CQE_PKT_TYPES_L2_MASK)
+
+#define HINIC_CSUM_ERR_BYPASSED(csum_err) \
+ ((csum_err) == RQ_CQE_STATUS_CSUM_BYPASS_VAL)
+
+#define HINIC_CSUM_ERR_IP(csum_err) \
+ ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_IP_MASK)
+
+#define HINIC_CSUM_ERR_L4(csum_err) \
+ ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_L4_MASK)
+
+#define TX_MSS_DEFAULT 0x3E00
+#define TX_MSS_MIN 0x50
+
+enum sq_wqe_type {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum rq_completion_fmt {
+ RQ_COMPLETE_SGE = 1
+};
+
+#define HINIC_VLAN_FILTER_EN (1U << 0)
+#define HINIC_BROADCAST_FILTER_EX_EN (1U << 1)
+
+#ifdef __cplusplus
+ #if __cplusplus
+}
+ #endif
+#endif /* __cplusplus */
+#endif /* __HINIC_QE_DEF_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h
b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h
new file mode 100644
index 000000000000..091bcccd9c75
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h
@@ -0,0 +1,228 @@
+/*SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __CHIPIF_SM_LT_H__
+#define __CHIPIF_SM_LT_H__
+
+#define SM_LT_LOAD (0x12)
+#define SM_LT_STORE (0x14)
+
+#define SM_LT_NUM_OFFSET 13
+#define SM_LT_ABUF_FLG_OFFSET 12
+#define SM_LT_BC_OFFSET 11
+
+#define SM_LT_ENTRY_16B 16
+#define SM_LT_ENTRY_32B 32
+#define SM_LT_ENTRY_48B 48
+#define SM_LT_ENTRY_64B 64
+
+#define TBL_LT_OFFSET_DEFAULT 0
+
+#define SM_CACHE_LINE_SHFT 4 /* log2(16) */
+#define SM_CACHE_LINE_SIZE 16 /* the size of cache line */
+
+#define MAX_SM_LT_READ_LINE_NUM 4
+#define MAX_SM_LT_WRITE_LINE_NUM 3
+
+#define SM_LT_FULL_BYTEENB 0xFFFF
+
+#define TBL_GET_ENB3_MASK(bitmask) (u16)(((bitmask) >> 32) & 0xFFFF)
+#define TBL_GET_ENB2_MASK(bitmask) (u16)(((bitmask) >> 16) & 0xFFFF)
+#define TBL_GET_ENB1_MASK(bitmask) (u16)((bitmask) & 0xFFFF)
+
+enum {
+ SM_LT_NUM_0 = 0, /* lt num = 0, load/store 16B */
+ SM_LT_NUM_1, /* lt num = 1, load/store 32B */
+ SM_LT_NUM_2, /* lt num = 2, load/store 48B */
+ SM_LT_NUM_3 /* lt num = 3, load 64B */
+};
+
+/* lt load request*/
+typedef union {
+ struct {
+ u32 offset:8;
+ u32 pad:3;
+ u32 bc:1;
+ u32 abuf_flg:1;
+ u32 num:2;
+ u32 ack:1;
+ u32 op_id:5;
+ u32 instance:6;
+ u32 src:5;
+ } bs;
+
+ u32 value;
+} sml_lt_req_head_u;
+
+typedef struct {
+ u32 extra;
+ sml_lt_req_head_u head;
+ u32 index;
+ u32 pad0;
+ u32 pad1;
+} sml_lt_load_req_s;
+
+typedef struct {
+ u32 extra;
+ sml_lt_req_head_u head;
+ u32 index;
+ u32 byte_enb[2];
+ u8 write_data[48];
+} sml_lt_store_req_s;
+
+enum {
+ SM_LT_OFFSET_1 = 1,
+ SM_LT_OFFSET_2,
+ SM_LT_OFFSET_3,
+ SM_LT_OFFSET_4,
+ SM_LT_OFFSET_5,
+ SM_LT_OFFSET_6,
+ SM_LT_OFFSET_7,
+ SM_LT_OFFSET_8,
+ SM_LT_OFFSET_9,
+ SM_LT_OFFSET_10,
+ SM_LT_OFFSET_11,
+ SM_LT_OFFSET_12,
+ SM_LT_OFFSET_13,
+ SM_LT_OFFSET_14,
+ SM_LT_OFFSET_15
+};
+
+static inline void sml_lt_store_memcpy(u32 *dst, u32 *src, u8 num)
+{
+ switch (num) {
+ case SM_LT_NUM_2:
+ *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11);
+ *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10);
+ *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9);
+ *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8);
+ /*lint -fallthrough*/
+ case SM_LT_NUM_1:
+ *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7);
+ *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6);
+ *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5);
+ *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4);
+ /*lint -fallthrough*/
+ case SM_LT_NUM_0:
+ *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3);
+ *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2);
+ *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1);
+ *dst = *src;
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void sml_lt_load_memcpy(u32 *dst, u32 *src, u8 num)
+{
+ switch (num) {
+ case SM_LT_NUM_3:
+ *(dst + SM_LT_OFFSET_15) = *(src + SM_LT_OFFSET_15);
+ *(dst + SM_LT_OFFSET_14) = *(src + SM_LT_OFFSET_14);
+ *(dst + SM_LT_OFFSET_13) = *(src + SM_LT_OFFSET_13);
+ *(dst + SM_LT_OFFSET_12) = *(src + SM_LT_OFFSET_12);
+ /*lint -fallthrough*/
+ case SM_LT_NUM_2:
+ *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11);
+ *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10);
+ *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9);
+ *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8);
+ /*lint -fallthrough*/
+ case SM_LT_NUM_1:
+ *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7);
+ *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6);
+ *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5);
+ *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4);
+ /*lint -fallthrough*/
+ case SM_LT_NUM_0:
+ *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3);
+ *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2);
+ *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1);
+ *dst = *src;
+ break;
+ default:
+ break;
+ }
+}
+
+enum HINIC_CSR_API_DATA_OPERATION_ID {
+ HINIC_CSR_OPERATION_WRITE_CSR = 0x1E,
+ HINIC_CSR_OPERATION_READ_CSR = 0x1F
+};
+
+enum HINIC_CSR_API_DATA_NEED_RESPONSE_DATA {
+ HINIC_CSR_NO_RESP_DATA = 0,
+ HINIC_CSR_NEED_RESP_DATA = 1
+};
+
+enum HINIC_CSR_API_DATA_DATA_SIZE {
+ HINIC_CSR_DATA_SZ_32 = 0,
+ HINIC_CSR_DATA_SZ_64 = 1
+};
+
+struct hinic_csr_request_api_data {
+ u32 dw0;
+
+ union {
+ struct {
+ u32 reserved1:13;
+ /* this field indicates the write/read data size:
+ * 2'b00: 32 bits
+ * 2'b01: 64 bits
+ * 2'b10~2'b11:reserved
+ */
+ u32 data_size:2;
+ /* this field indicates that requestor expect receive a
+ * response data or not.
+ * 1'b0: expect not to receive a response data.
+ * 1'b1: expect to receive a response data.
+ */
+ u32 need_response:1;
+ /* this field indicates the operation that the requestor
+ * expected.
+ * 5'b1_1110: write value to csr space.
+ * 5'b1_1111: read register from csr space.
+ */
+ u32 operation_id:5;
+ u32 reserved2:6;
+ /* this field specifies the Src node ID for this API
+ * request message.
+ */
+ u32 src_node_id:5;
+ } bits;
+
+ u32 val32;
+ } dw1;
+
+ union {
+ struct {
+ /* it specifies the CSR address. */
+ u32 csr_addr:26;
+ u32 reserved3:6;
+ } bits;
+
+ u32 val32;
+ } dw2;
+
+ /* if data_size=2'b01, it is high 32 bits of write data. else, it is
+ * 32'hFFFF_FFFF.
+ */
+ u32 csr_write_data_h;
+ /* the low 32 bits of write data. */
+ u32 csr_write_data_l;
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
new file mode 100644
index 000000000000..7e1814520e43
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_sml_counter.h"
+
+#ifndef HTONL
+#define HTONL(x) \
+ ((((x) & 0x000000ff) << 24) \
+ | (((x) & 0x0000ff00) << 8) \
+ | (((x) & 0x00ff0000) >> 8) \
+ | (((x) & 0xff000000) >> 24))
+#endif
+
+static void sml_ctr_htonl_n(u32 *node, u32 ullen)
+{
+ u32 i;
+
+ for (i = 0; i < ullen; i++) {
+ *node = HTONL(*node);
+ node++;
+ }
+}
+
+static void hinic_sml_ctr_read_build_req(chipif_sml_ctr_rd_req_s *msg,
+ u8 instance_id, u8 op_id,
+ u8 ack, u32 ctr_id, u32 init_val)
+{
+ msg->head.value = 0;
+ msg->head.bs.instance = instance_id;
+ msg->head.bs.op_id = op_id;
+ msg->head.bs.ack = ack;
+ msg->head.value = HTONL(msg->head.value);
+
+ msg->ctr_id = ctr_id;
+ msg->ctr_id = HTONL(msg->ctr_id);
+
+ msg->initial = init_val;
+}
+
+static void hinic_sml_ctr_write_build_req(chipif_sml_ctr_wr_req_s *msg,
+ u8 instance_id, u8 op_id,
+ u8 ack, u32 ctr_id,
+ u64 val1, u64 val2)
+{
+ msg->head.value = 0;
+ msg->head.bs.instance = instance_id;
+ msg->head.bs.op_id = op_id;
+ msg->head.bs.ack = ack;
+ msg->head.value = HTONL(msg->head.value);
+
+ msg->ctr_id = ctr_id;
+ msg->ctr_id = HTONL(msg->ctr_id);
+
+ msg->value1_h = val1 >> 32;
+ msg->value1_l = val1 & 0xFFFFFFFF;
+
+ msg->value2_h = val2 >> 32;
+ msg->value2_l = val2 & 0xFFFFFFFF;
+}
+
+/**
+ * hinic_sm_ctr_rd32 - small single 32 counter read
+ * @hwdev: the hardware device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value)
+{
+ chipif_sml_ctr_rd_req_s req;
+ ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req),
+ (void *)&rsp, (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 32bit counter read fail, err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = rsp.bs_ss32_rsp.value1;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_sm_ctr_rd32);
+
+/**
+ * hinic_sm_ctr_rd32_clear - small single 32 counter read and clear to zero
+ * @hwdev: the hardware device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc)
+ **/
+int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u32 *value)
+{
+ chipif_sml_ctr_rd_req_s req;
+ ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hinic_sml_ctr_read_build_req(&req, instance,
+ CHIPIF_SM_CTR_OP_READ_CLEAR,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req),
+ (void *)&rsp, (unsigned short)sizeof(rsp));
+
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 32bit counter clear fail, err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = rsp.bs_ss32_rsp.value1;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_sm_ctr_rd32_clear);
+
+/**
+ * hinic_sm_ctr_wr32 - small single 32 counter write
+ * @hwdev: the hardware device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @value: write counter value
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value)
+{
+ chipif_sml_ctr_wr_req_s req;
+ chipif_sml_ctr_wr_rsp_s rsp;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, (u64)value, 0ULL);
+
+ return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
+
+/**
+ * hinic_sm_ctr_rd64 - big counter 64 read
+ * @hwdev: the hardware device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value)
+{
+ chipif_sml_ctr_rd_req_s req;
+ ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 64bit counter read fail err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2;
+
+ return 0;
+}
+
+/**
+ * hinic_sm_ctr_wr64 - big single 64 counter write
+ * @hwdev: the hardware device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @value: write counter value
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value)
+{
+ chipif_sml_ctr_wr_req_s req;
+ chipif_sml_ctr_wr_rsp_s rsp;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, value, 0ULL);
+
+ return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
+
+/**
+ * hinic_sm_ctr_rd64_pair - big pair 128 counter read
+ * @hwdev: the hardware device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @value1: read counter value ptr
+ * @value2: read counter value ptr
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 *value1, u64 *value2)
+{
+ chipif_sml_ctr_rd_req_s req;
+ ctr_rd_rsp_u rsp;
+ int ret;
+
+ if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) {
+ pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd
number\n",
+ hwdev, value1, value2, ctr_id);
+ return -EFAULT;
+ }
+
+ hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 64 bit rd pair ret(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l;
+ *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l;
+
+ return 0;
+}
+
+/**
+ * hinic_sm_ctr_wr64_pair - big pair 128 counter write
+ * @hwdev: the hardware device
+ * @node: the node id
+ * @ctr_id: counter id
+ * @value1: write counter value
+ * @value2: write counter value
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 value1, u64 value2)
+{
+ chipif_sml_ctr_wr_req_s req;
+ chipif_sml_ctr_wr_rsp_s rsp;
+
+ /* pair pattern ctr_id must be even number */
+ if (!hwdev || (0 != (ctr_id & 0x1))) {
+ pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n",
+ ctr_id);
+ return -EFAULT;
+ }
+
+ hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, value1, value2);
+ return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h
b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h
new file mode 100644
index 000000000000..cf0e9e863b8f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __CHIPIF_SML_COUNTER_H__
+#define __CHIPIF_SML_COUNTER_H__
+
+#define CHIPIF_FUNC_PF 0
+#define CHIPIF_FUNC_VF 1
+#define CHIPIF_FUNC_PPF 2
+
+#define CHIPIF_ACK 1
+#define CHIPIF_NOACK 0
+
+#define CHIPIF_SM_CTR_OP_READ 0x2
+#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6
+#define CHIPIF_SM_CTR_OP_WRITE 0x3
+
+#define SMALL_CNT_READ_RSP_SIZE 16
+
+/* request head */
+typedef union {
+ struct {
+ u32 pad:15;
+ u32 ack:1;
+ u32 op_id:5;
+ u32 instance:6;
+ u32 src:5;
+ } bs;
+
+ u32 value;
+} chipif_sml_ctr_req_head_u;
+/* counter read request struct */
+typedef struct {
+ u32 extra;
+ chipif_sml_ctr_req_head_u head;
+ u32 ctr_id;
+ u32 initial;
+ u32 pad;
+} chipif_sml_ctr_rd_req_s;
+
+/* counter read response union */
+typedef union {
+ struct {
+ u32 value1:16;
+ u32 pad0:16;
+ u32 pad1[3];
+ } bs_ss16_rsp;
+
+ struct {
+ u32 value1;
+ u32 pad[3];
+ } bs_ss32_rsp;
+
+ struct {
+ u32 value1:20;
+ u32 pad0:12;
+ u32 value2:12;
+ u32 pad1:20;
+ u32 pad2[2];
+ } bs_sp_rsp;
+
+ struct {
+ u32 value1;
+ u32 value2;
+ u32 pad[2];
+ } bs_bs64_rsp;
+
+ struct {
+ u32 val1_h;
+ u32 val1_l;
+ u32 val2_h;
+ u32 val2_l;
+ } bs_bp64_rsp;
+
+} ctr_rd_rsp_u;
+
+/* resopnse head */
+typedef union {
+ struct {
+ u32 pad:30; /*reserve*/
+ u32 code:2; /*error code*/
+ } bs;
+
+ u32 value;
+} sml_ctr_rsp_head_u;
+
+/* counter write request struct */
+typedef struct {
+ u32 extra;
+ chipif_sml_ctr_req_head_u head;
+ u32 ctr_id;
+ u32 rsv1;
+ u32 rsv2;
+ u32 value1_h;
+ u32 value1_l;
+ u32 value2_h;
+ u32 value2_l;
+} chipif_sml_ctr_wr_req_s;
+
+/*counter write response struct */
+typedef struct {
+ sml_ctr_rsp_head_u head;
+ u32 pad[3];
+} chipif_sml_ctr_wr_rsp_s;
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c
b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c
new file mode 100644
index 000000000000..246ceead5fed
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_sm_lt.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_dbg.h"
+
+#define ACK 1
+#define NOACK 0
+
+#define LT_LOAD16_API_SIZE (16 + 4)
+#define LT_STORE16_API_SIZE (32 + 4)
+
+#ifndef HTONL
+#define HTONL(x) \
+ ((((x) & 0x000000ff) << 24) \
+ | (((x) & 0x0000ff00) << 8) \
+ | (((x) & 0x00ff0000) >> 8) \
+ | (((x) & 0xff000000) >> 24))
+#endif
+
+static inline void sm_lt_build_head(sml_lt_req_head_u *head,
+ u8 instance_id,
+ u8 op_id, u8 ack,
+ u8 offset, u8 num)
+{
+ head->value = 0;
+ head->bs.instance = instance_id;
+ head->bs.op_id = op_id;
+ head->bs.ack = ack;
+ head->bs.num = num;
+ head->bs.abuf_flg = 0;
+ head->bs.bc = 1;
+ head->bs.offset = offset;
+ head->value = HTONL((head->value));
+}
+
+static inline void sm_lt_load_build_req(sml_lt_load_req_s *req,
+ u8 instance_id,
+ u8 op_id, u8 ack,
+ u32 lt_index,
+ u8 offset, u8 num)
+{
+ sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num);
+ req->extra = 0;
+ req->index = lt_index;
+ req->index = HTONL(req->index);
+}
+
+static inline void sm_lt_store_build_req(sml_lt_store_req_s *req,
+ u8 instance_id,
+ u8 op_id, u8 ack,
+ u32 lt_index,
+ u8 offset,
+ u8 num,
+ u16 byte_enb3,
+ u16 byte_enb2,
+ u16 byte_enb1,
+ u8 *data)
+{
+ sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num);
+ req->index = lt_index;
+ req->index = HTONL(req->index);
+ req->extra = 0;
+ req->byte_enb[0] = (u32)(byte_enb3);
+ req->byte_enb[0] = HTONL(req->byte_enb[0]);
+ req->byte_enb[1] = HTONL((((u32)byte_enb2) << 16) | byte_enb1);
+ sml_lt_store_memcpy((u32 *)req->write_data, (u32 *)(void *)data, num);
+}
+
+int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data)
+{
+ sml_lt_load_req_s req;
+ int ret;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ sm_lt_load_build_req(&req, instance, SM_LT_LOAD, ACK, lt_index, 0, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, dest, &req,
+ LT_LOAD16_API_SIZE, (void *)data, 16);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Read linear table 16byte fail, err: %d\n", ret);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_dbg_lt_rd_16byte);
+
+int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data, u16 mask)
+{
+ sml_lt_store_req_s req;
+ int ret;
+
+ if (!hwdev || !data)
+ return -EFAULT;
+
+ sm_lt_store_build_req(&req, instance, SM_LT_STORE, NOACK, lt_index,
+ 0, 0, 0, 0, mask, data);
+
+ ret = hinic_api_cmd_write_nack(hwdev, dest, &req, LT_STORE16_API_SIZE);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Write linear table 16byte fail, err: %d\n", ret);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_dbg_lt_wr_16byte_mask);
+
+int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val)
+{
+ struct hinic_csr_request_api_data api_data = {0};
+ u32 csr_val = 0;
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev || !val)
+ return -EFAULT;
+
+ memset(&api_data, 0, sizeof(struct hinic_csr_request_api_data));
+ api_data.dw0 = 0;
+ api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR;
+ api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA;
+ api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+
+ ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data),
+ in_size, &csr_val, 4);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Read 32 bit csr fail, dest %d addr 0x%x, ret: 0x%x\n",
+ dest, addr, ret);
+ return ret;
+ }
+
+ *val = csr_val;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_api_csr_rd32);
+
+int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val)
+{
+ struct hinic_csr_request_api_data api_data;
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ memset(&api_data, 0, sizeof(struct hinic_csr_request_api_data));
+ api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_WRITE_CSR;
+ api_data.dw1.bits.need_response = HINIC_CSR_NO_RESP_DATA;
+ api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+ api_data.csr_write_data_h = 0xffffffff;
+ api_data.csr_write_data_l = val;
+
+ ret = hinic_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Write 32 bit csr fail! dest %d addr 0x%x val 0x%x\n",
+ dest, addr, val);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_api_csr_wr32);
+
+int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val)
+{
+ struct hinic_csr_request_api_data api_data = {0};
+ u64 csr_val = 0;
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev || !val)
+ return -EFAULT;
+
+ memset(&api_data, 0, sizeof(struct hinic_csr_request_api_data));
+ api_data.dw0 = 0;
+ api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR;
+ api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA;
+ api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_64;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+
+ ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data),
+ in_size, &csr_val, 8);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Read 64 bit csr fail, dest %d addr 0x%x\n",
+ dest, addr);
+ return ret;
+ }
+
+ *val = csr_val;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_api_csr_rd64);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
b/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
new file mode 100644
index 000000000000..b837dab4100e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
@@ -0,0 +1,2728 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __SML_TABLE_H__
+#define __SML_TABLE_H__
+
+#include "hinic_sml_table_pub.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif /* __cplusplus */
+
+#define TBL_ID_CTR_DFX_S32_SM_NODE 11
+#define TBL_ID_CTR_DFX_S32_SM_INST 20
+
+#define TBL_ID_CTR_DFX_PAIR_SM_NODE 10
+#define TBL_ID_CTR_DFX_PAIR_SM_INST 24
+
+#define TBL_ID_CTR_DFX_S64_SM_NODE 11
+#define TBL_ID_CTR_DFX_S64_SM_INST 21
+
+#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)) && \
+ (!defined(__FPGA__)))
+
+#define TBL_ID_GLOBAL_SM_NODE 10
+#define TBL_ID_GLOBAL_SM_INST 1
+
+#define TBL_ID_PORT_CFG_SM_NODE 10
+#define TBL_ID_PORT_CFG_SM_INST 2
+
+#define TBL_ID_VLAN_SM_NODE 10
+#define TBL_ID_VLAN_SM_INST 3
+
+#define TBL_ID_MULTICAST_SM_NODE 10
+#define TBL_ID_MULTICAST_SM_INST 4
+
+#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
+#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
+
+#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
+#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
+
+#define TBL_ID_CAR_SM_NODE 10
+#define TBL_ID_CAR_SM_INST 7
+
+#define TBL_ID_IPMAC_FILTER_SM_NODE 10
+#define TBL_ID_IPMAC_FILTER_SM_INST 8
+
+#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
+#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
+
+#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
+#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
+
+#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
+#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
+
+#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
+#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
+
+#define TBL_ID_MAC_SM_NODE 10
+#define TBL_ID_MAC_SM_INST 21
+
+#define TBL_ID_MAC_BHEAP_SM_NODE 10
+#define TBL_ID_MAC_BHEAP_SM_INST 22
+
+#define TBL_ID_MAC_MISC_SM_NODE 10
+#define TBL_ID_MAC_MISC_SM_INST 23
+
+#define TBL_ID_FUNC_CFG_SM_NODE 11
+#define TBL_ID_FUNC_CFG_SM_INST 1
+
+#define TBL_ID_TRUNK_FWD_SM_NODE 11
+#define TBL_ID_TRUNK_FWD_SM_INST 2
+
+#define TBL_ID_VLAN_FILTER_SM_NODE 11
+#define TBL_ID_VLAN_FILTER_SM_INST 3
+
+#define TBL_ID_ELB_SM_NODE 11
+#define TBL_ID_ELB_SM_INST 4
+
+#define TBL_ID_MISC_RSS_HASH1_SM_NODE 11
+#define TBL_ID_MISC_RSS_HASH1_SM_INST 5
+
+#define TBL_ID_RSS_CONTEXT_SM_NODE 11
+#define TBL_ID_RSS_CONTEXT_SM_INST 6
+
+#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 11
+#define TBL_ID_ETHERTYPE_FILTER_SM_INST 7
+
+#define TBL_ID_VTEP_IP_SM_NODE 11
+#define TBL_ID_VTEP_IP_SM_INST 8
+
+#define TBL_ID_NAT_SM_NODE 11
+#define TBL_ID_NAT_SM_INST 9
+
+#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 11
+#define TBL_ID_BHEAP_LRO_AGING_SM_INST 10
+
+#define TBL_ID_MISC_LRO_AGING_SM_NODE 11
+#define TBL_ID_MISC_LRO_AGING_SM_INST 11
+
+#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 11
+#define TBL_ID_BHEAP_CQE_AGING_SM_INST 12
+
+#define TBL_ID_MISC_CQE_AGING_SM_NODE 11
+#define TBL_ID_MISC_CQE_AGING_SM_INST 13
+
+#define TBL_ID_DFX_LOG_POINTER_SM_NODE 11
+#define TBL_ID_DFX_LOG_POINTER_SM_INST 14
+
+#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 11
+#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 15
+
+#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 11
+#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 16
+
+#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 11
+#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 17
+
+#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 11
+#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 41
+
+#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 11
+#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 42
+
+#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 11
+#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 43
+
+#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 11
+#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 44
+
+#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 11
+#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 45
+
+#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 11
+#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 46
+
+#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 11
+#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 47
+
+#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 11
+#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 48
+
+#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 11
+#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 49
+
+#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 11
+#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 50
+
+#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 11
+#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 51
+
+#define TBL_ID_RWLOCK_ROCE_SM_NODE 11
+#define TBL_ID_RWLOCK_ROCE_SM_INST 30
+
+#define TBL_ID_CQE_ADDR_SM_NODE 11
+#define TBL_ID_CQE_ADDR_SM_INST 31
+
+#else
+
+#define TBL_ID_GLOBAL_SM_NODE 10
+#define TBL_ID_GLOBAL_SM_INST 1
+
+#define TBL_ID_PORT_CFG_SM_NODE 10
+#define TBL_ID_PORT_CFG_SM_INST 2
+
+#define TBL_ID_VLAN_SM_NODE 10
+#define TBL_ID_VLAN_SM_INST 3
+
+#define TBL_ID_MULTICAST_SM_NODE 10
+#define TBL_ID_MULTICAST_SM_INST 4
+
+#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
+#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
+
+#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
+#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
+
+#define TBL_ID_CAR_SM_NODE 10
+#define TBL_ID_CAR_SM_INST 7
+
+#define TBL_ID_IPMAC_FILTER_SM_NODE 10
+#define TBL_ID_IPMAC_FILTER_SM_INST 8
+
+#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
+#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
+
+#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
+#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
+
+#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
+#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
+
+#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
+#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
+
+#define TBL_ID_MAC_SM_NODE 10
+#define TBL_ID_MAC_SM_INST 13
+
+#define TBL_ID_MAC_BHEAP_SM_NODE 10
+#define TBL_ID_MAC_BHEAP_SM_INST 14
+
+#define TBL_ID_MAC_MISC_SM_NODE 10
+#define TBL_ID_MAC_MISC_SM_INST 15
+
+#define TBL_ID_FUNC_CFG_SM_NODE 10
+#define TBL_ID_FUNC_CFG_SM_INST 16
+
+#define TBL_ID_TRUNK_FWD_SM_NODE 10
+#define TBL_ID_TRUNK_FWD_SM_INST 17
+
+#define TBL_ID_VLAN_FILTER_SM_NODE 10
+#define TBL_ID_VLAN_FILTER_SM_INST 18
+
+#define TBL_ID_ELB_SM_NODE 10
+#define TBL_ID_ELB_SM_INST 19
+
+#define TBL_ID_MISC_RSS_HASH1_SM_NODE 10
+#define TBL_ID_MISC_RSS_HASH1_SM_INST 20
+
+#define TBL_ID_RSS_CONTEXT_SM_NODE 10
+#define TBL_ID_RSS_CONTEXT_SM_INST 21
+
+#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 10
+#define TBL_ID_ETHERTYPE_FILTER_SM_INST 22
+
+#define TBL_ID_VTEP_IP_SM_NODE 10
+#define TBL_ID_VTEP_IP_SM_INST 23
+
+#define TBL_ID_NAT_SM_NODE 10
+#define TBL_ID_NAT_SM_INST 24
+
+#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 10
+#define TBL_ID_BHEAP_LRO_AGING_SM_INST 25
+
+#define TBL_ID_MISC_LRO_AGING_SM_NODE 10
+#define TBL_ID_MISC_LRO_AGING_SM_INST 26
+
+#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 10
+#define TBL_ID_BHEAP_CQE_AGING_SM_INST 27
+
+#define TBL_ID_MISC_CQE_AGING_SM_NODE 10
+#define TBL_ID_MISC_CQE_AGING_SM_INST 28
+
+#define TBL_ID_DFX_LOG_POINTER_SM_NODE 10
+#define TBL_ID_DFX_LOG_POINTER_SM_INST 29
+
+#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 10
+#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 40
+
+#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 10
+#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 41
+
+#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 10
+#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 42
+
+#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 10
+#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 43
+
+#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 10
+#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 44
+
+#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 10
+#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 45
+
+#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 10
+#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 46
+
+#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 10
+#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 47
+
+#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 10
+#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 48
+
+#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 10
+#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 49
+
+#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 10
+#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 50
+
+#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 10
+#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 51
+
+#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 10
+#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 52
+
+#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 10
+#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 53
+
+#define TBL_ID_RWLOCK_ROCE_SM_NODE 10
+#define TBL_ID_RWLOCK_ROCE_SM_INST 30
+
+#define TBL_ID_CQE_ADDR_SM_NODE 10
+#define TBL_ID_CQE_ADDR_SM_INST 31
+
+#endif
+
+#define TBL_ID_MISC_RSS_HASH_SM_NODE TBL_ID_MISC_RSS_HASH0_SM_NODE
+#define TBL_ID_MISC_RSS_HASH_SM_INST TBL_ID_MISC_RSS_HASH0_SM_INST
+
+/*rx cqe checksum err*/
+#define NIC_RX_CSUM_IP_CSUM_ERR BIT(0)
+#define NIC_RX_CSUM_TCP_CSUM_ERR BIT(1)
+#define NIC_RX_CSUM_UDP_CSUM_ERR BIT(2)
+#define NIC_RX_CSUM_IGMP_CSUM_ERR BIT(3)
+#define NIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4)
+#define NIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5)
+#define NIC_RX_CSUM_SCTP_CRC_ERR BIT(6)
+#define NIC_RX_CSUM_HW_BYPASS_ERR BIT(7)
+
+typedef struct tag_log_ctrl {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 mod_name:4;
+ u32 log_level:4;
+ u32 rsvd:8;
+ u32 line_num:16;
+#else
+ u32 line_num:16;
+ u32 rsvd:8;
+ u32 log_level:4;
+ u32 mod_name:4;
+#endif
+} log_ctrl;
+
+/**
+ * 1. bank GPA address is HOST-based, every host has 4 bank GPA,
+ * total size 4*32B
+ * 2. Allocated space for storing
+ * Two global entry are allocated for storing bank GPA,
+ * which are index5 and index6. (Note index start value is 0)
+ * The index5 top 32B store the bank GPA of host 0;
+ * Remain 32B store the bank GPA of host 1.
+ * The index6 top 32B store the bank GPA of host 2,
+ * the remain 32B store the bank GPA of host 3.
+ * Bank GPA corresponding to the each host is based on the following format)
+ */
+typedef struct tag_sml_global_bank_gpa {
+ u32 bank0_gpa_h32;
+ u32 bank0_gpa_l32;
+
+ u32 bank1_gpa_h32;
+ u32 bank1_gpa_l32;
+
+ u32 bank2_gpa_h32;
+ u32 bank2_gpa_l32;
+
+ u32 bank3_gpa_h32;
+ u32 bank3_gpa_l32;
+} global_bank_gpa_s;
+
+/**
+ * Struct name: sml_global_table_s
+ * @brief: global_table structure
+ * Description: global configuration table
+ */
+typedef struct tag_sml_global_table {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 port_mode:1; /*portmode:0-eth;1-fic */
+ /* dualplaneenable:0-disable;1-enable */
+ u32 dual_plane_en:1;
+ /* fourrouteenable:0-disable;1-enable */
+ u32 four_route_en:1;
+ /* ficworkmode:0-fabric;1-fullmesh.*/
+ u32 fic_work_mode:1;
+ /* unicast/multicastmode:0-drop;
+ * 1-broadcastinvlandomain
+ */
+ u32 un_mc_mode:1;
+ /* maclearnenable:1-enable */
+ u32 mac_learn_en:1;
+ u32 qcn_en:1;
+ u32 esl_run_flag:1;
+ /* 1-special protocal pkt to up; 0-to x86 */
+ u32 special_pro_to_up_flag:1;
+ u32 vf_mask:4;
+ u32 dif_ser_type:2;
+ u32 rsvd0:1;
+ u32 board_num:16; /*boardnumber */
+#else
+ u32 board_num:16; /*boardnumber */
+ u32 rsvd0:1;
+ u32 dif_ser_type:2;
+ u32 vf_mask:4;
+ /*1-special protocal pkt to up; 0-to x86 */
+ u32 special_pro_to_up_flag:1;
+ u32 esl_run_flag:1;
+ u32 qcn_en:1;
+ u32 mac_learn_en:1; /*maclearnenable:1-enable */
+ /*unicast/multicastmode:0-drop;1-broadcastinvlandomain*/
+ u32 un_mc_mode:1;
+ /* ficworkmode:0-fabric;1-fullmesh.*/
+ u32 fic_work_mode:1;
+ /*fourrouteenable:0-disable;1-enable */
+ u32 four_route_en:1;
+ /*dualplaneenable:0-disable;1-enable */
+ u32 dual_plane_en:1;
+ u32 port_mode:1; /*portmode:0-eth;1-fic */
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 bc_offset:16; /*broadcastoffset */
+ u32 mc_offset:16; /*multicastoffset */
+#else
+ u32 mc_offset:16; /*multicastoffset */
+ u32 bc_offset:16; /*broadcastoffset */
+#endif
+ } bs;
+ u32 value;
+ } dw1;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
+ u32 xrc_pl_dec:1;
+ u32 sq_cqn:20;
+ u32 qpc_stg:1;
+ u32 qpc_state_err:1;
+ u32 qpc_wb_flag:1;
+#else
+ u32 qpc_wb_flag:1;
+ u32 qpc_state_err:1;
+ u32 qpc_stg:1;
+ u32 sq_cqn:20;
+ u32 xrc_pl_dec:1;
+ u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
+#endif
+ } bs;
+
+ u32 value;
+ } dw2;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 drop_cause_id:16;
+ u32 pkt_len:16;
+#else
+ u32 pkt_len:16;
+ u32 drop_cause_id:16;
+#endif
+ } bs;
+
+ u32 value;
+ } dw3;
+
+ u8 fcoe_vf_table[12];
+
+ union {
+ struct {
+ /* [31:30]Pipeline number mode. */
+ u32 cfg_mode_pn:2;
+ /* [29:28]initial default fq mode for traffic
+ * from rx side
+ */
+ u32 cfg_mode_init_def_fq:2;
+ /* [27:16]base fqid for initial default fqs
+ * (for packest from rx side only).
+ */
+ u32 cfg_base_init_def_fq:12;
+ /* [15:15]push doorbell as new packet to tile
+ * via command path enable.
+ */
+ u32 cfg_psh_msg_en:1;
+ /* [14:14]1,enable asc for scanning
+ * active fq.0,disable.
+ */
+ u32 enable_asc:1;
+ /* [13:13]1,enable pro for commands process.0,disable.*/
+ u32 enable_pro:1;
+ /* [12:12]1,ngsf mode.0,ethernet mode. */
+ u32 cfg_ngsf_mod:1;
+ /* [11:11]Stateful process enable. */
+ u32 enable_stf:1;
+ /* [10:9]initial default fq mode for
+ * traffic from tx side.
+ */
+ u32 cfg_mode_init_def_fq_tx:2;
+ /* [8:0]maximum allocable oeid configuration. */
+ u32 cfg_max_oeid:9;
+ } bs;
+ u32 value;
+ } fq_mode;
+
+ u32 rsvd2[8];
+} sml_global_table_s;
+
+/**
+ * Struct name: sml_fic_config_table_s
+ * @brief: global_table structure
+ * Description: global configuration table
+ */
+typedef struct tag_sml_fic_config_table {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /*dualplaneenable:0-disable;1-enable */
+ u32 dual_plane_en:1;
+ /*fourrouteenable:0-disable;1-enable */
+ u32 four_route_en:1;
+ /* ficworkmode:0-fabric;1-fullmesh.*/
+ u32 fic_work_mode:1;
+ u32 mac_learn_en:1; /*maclearnenable:1-enable */
+ u32 rsvd:12;
+ u32 board_num:16; /*boardnumber */
+#else
+ u32 board_num:16; /*boardnumber */
+ u32 rsvd:12;
+ u32 mac_learn_en:1;
+ /* ficworkmode:0-fabric;1-fullmesh.*/
+ u32 fic_work_mode:1;
+ /* fourrouteenable:0-disable;1-enable */
+ u32 four_route_en:1;
+ /* dualplaneenable:0-disable;1-enable */
+ u32 dual_plane_en:1;
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 bc_offset:16; /*broadcastoffset */
+ u32 mc_offset:16; /*multicastoffset */
+#else
+ u32 mc_offset:16; /*multicastoffset */
+ u32 bc_offset:16; /*broadcastoffset */
+#endif
+ } bs;
+ u32 value;
+ } dw1;
+
+ u32 rsvd2[14];
+} sml_fic_config_table_s;
+
+/**
+ * Struct name: sml_ucode_version_info_table_s
+ * @brief: microcode version information structure
+ * Description: global configuration table entry data structure of index 1
+ */
+typedef struct tag_sml_ucode_version_info_table {
+ u32 ucode_version[4];
+ u32 ucode_compile_time[5];
+ u32 rsvd[7];
+} sml_ucode_version_info_table_s;
+
+/**
+ * Struct name: sml_funcfg_tbl_s
+ * @brief: Function Configuration Table
+ * Description: Function Configuration attribute table
+ */
+typedef struct tag_sml_funcfg_tbl {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* function valid: 0-invalid; 1-valid */
+ u32 valid:1;
+ /* mac learn enable: 0-disable; 1-enable */
+ u32 learn_en:1;
+ /* lli enable: 0-disable; 1-enable */
+ u32 lli_en:1;
+ /* rss enable: 0-disable; 1-enable */
+ u32 rss_en:1;
+ /* rx vlan offload enable: 0-disable; 1-enable */
+ u32 rxvlan_offload_en:1;
+ /* tso local coalesce enable: 0-disable; 1-enable */
+ u32 tso_local_coalesce:1;
+ u32 rsvd1:1;
+ u32 rsvd2:1;
+ /* qos rx car enable: 0-disable; 1-enable */
+ u32 qos_rx_car_en:1;
+ /* mac filter enable: 0-disable; 1-enable */
+ u32 mac_filter_en:1;
+ /* ipmac filter enable: 0-disable; 1-enable */
+ u32 ipmac_filter_en:1;
+ /* ethtype filter enable: 0-disable; 1-enable */
+ u32 ethtype_filter_en:1;
+ /* mc bc limit enable: 0-disable; 1-enable */
+ u32 mc_bc_limit_en:1;
+ /* acl tx enable: 0-disable; 1-enable */
+ u32 acl_tx_en:1;
+ /* acl rx enable: 0-disable; 1-enable */
+ u32 acl_rx_en:1;
+ /* ovs function enable: 0-disable; 1-enable */
+ u32 ovs_func_en:1;
+ /* ucode capture enable: 0-disable; 1-enable */
+ u32 ucapture_en:1;
+ /* fic car enable: 0-disable; 1-enable */
+ u32 fic_car_en:1;
+ u32 tso_en:1;
+ u32 nic_rx_mode:5; /* nic_rx_mode:
+ * 0b00001: unicast mode
+ * 0b00010: multicast mode
+ * 0b00100: broadcast mode
+ * 0b01000: all multicast mode
+ * 0b10000: promisc mod
+ */
+ u32 rsvd4:3;
+ u32 def_pri:3; /* default priority */
+ /* host id: [0~3]. support up to 4 Host. */
+ u32 host_id:2;
+#else
+ u32 host_id:2;
+ u32 def_pri:3;
+ u32 rsvd4:3;
+ u32 nic_rx_mode:5;
+ u32 tso_en:1;
+ u32 fic_car_en:1;
+ /* ucode capture enable: 0-disable; 1-enable */
+ u32 ucapture_en:1;
+ u32 ovs_func_en:1;
+ u32 acl_rx_en:1;
+ u32 acl_tx_en:1;
+ u32 mc_bc_limit_en:1;
+ u32 ethtype_filter_en:1;
+ u32 ipmac_filter_en:1;
+ u32 mac_filter_en:1;
+ u32 qos_rx_car_en:1;
+ u32 rsvd2:1;
+ u32 rsvd1:1;
+ u32 tso_local_coalesce:1;
+ u32 rxvlan_offload_en:1;
+ u32 rss_en:1;
+ u32 lli_en:1;
+ u32 learn_en:1;
+ u32 valid:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 mtu:16; /* mtu value: [64-15500] */
+ u32 rsvd1:1;
+ /* vlan mode: 0-all; 1-access; 2-trunk;
+ * 3-hybrid(unsupport); 4-qinq port;
+ */
+ u32 vlan_mode:3;
+ u32 vlan_id:12; /* vlan id: [0~4095] */
+#else
+ u32 vlan_id:12;
+ u32 vlan_mode:3;
+ u32 rsvd1:1;
+ u32 mtu:16;
+#endif
+ } bs;
+
+ u32 value;
+ } dw1;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 lli_mode:1; /* lli mode */
+ /* er forward trunk type: 0-ethernet type, 1-fic type */
+ u32 er_fwd_trunk_type:1;
+ /* er forward trunk mode:
+ * 0-standby; 1-smac; 2-dmac; 3-smacdmac; 4-sip; 5-dip;
+ * 6-sipdip; 7-5tuples; 8-lacp
+ */
+ u32 er_fwd_trunk_mode:4;
+ /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
+ * 2-Multi-Channel(unsupport)
+ */
+ u32 er_mode:2;
+ /* edge relay id: [0~15]. support up to 16 er. */
+ u32 er_id:4;
+ /* er forward type: 2-port; 3-fic;
+ * 4-trunk; other-unsupport
+ */
+ u32 er_fwd_type:4;
+ /* er forward id:
+ * fwd_type=2: forward ethernet port id
+ * fwd_type=3: forward fic id(tb+tp)
+ * fwd_type=4: forward trunk id
+ */
+ u32 er_fwd_id:16;
+#else
+ u32 er_fwd_id:16;
+ u32 er_fwd_type:4;
+ u32 er_id:4;
+ u32 er_mode:2;
+ u32 er_fwd_trunk_mode:4;
+ u32 er_fwd_trunk_type:1;
+ u32 lli_mode:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw2;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 pfc_en:1;
+ u32 rsvd1:7;
+ u32 ovs_invld_tcp_action:1;
+ u32 ovs_ip_frag_action:1;
+ u32 rsvd2:2;
+ u32 roce_en:1;
+ u32 iwarp_en:1;
+ u32 fcoe_en:1;
+ u32 toe_en:1;
+ u32 rsvd3:8;
+ u32 ethtype_group_id:8;
+#else
+ u32 ethtype_group_id:8;
+ u32 rsvd3:8;
+ u32 toe_en:1;
+ u32 fcoe_en:1;
+ u32 iwarp_en:1;
+ u32 roce_en:1;
+ u32 rsvd2:2;
+ u32 ovs_ip_frag_action:1;
+ u32 ovs_invld_tcp_action:1;
+ u32 rsvd1:7;
+ u32 pfc_en:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw3;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1:8;
+ u32 vni:24;
+#else
+ u32 vni:24;
+ u32 rsvd1:8;
+#endif
+ } bs;
+
+ u32 value;
+ } dw4;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1;
+#else
+ u32 rsvd1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw5;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1:8;
+ u32 rq_thd:13;
+ u32 host_car_id:11; /* host car id */
+#else
+ u32 host_car_id:11;
+ u32 rq_thd:13;
+ u32 rsvd1:8;
+#endif
+ } bs;
+
+ u32 value;
+ } dw6;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1:5;
+ u32 fic_uc_car_id:11; /* fic unicast car id */
+ u32 rsvd2:5;
+ u32 fic_mc_car_id:11; /* fic multicast car id */
+#else
+ u32 fic_mc_car_id:11;
+ u32 rsvd2:5;
+ u32 fic_uc_car_id:11;
+ u32 rsvd1:5;
+#endif
+ } fic_bs;
+
+ u32 value;
+ } dw7;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* safe group identifier valid: 0-invalid; 1-valid */
+ u32 sg_id_valid:1;
+ u32 sg_id:10; /* safe group identifier */
+ u32 rsvd9:1;
+ /* rq priority enable: 0-disable; 1-enable */
+ u32 rq_pri_en:1;
+ /* rq priority num: 0-1pri; 1-2pri; 2-4pri; 3-8pri */
+ u32 rq_pri_num:3;
+ /* one wqe buffer size, default is 2K bytes */
+ u32 rx_wqe_buffer_size:16;
+#else
+ u32 rx_wqe_buffer_size:16;
+ u32 rq_pri_num:3;
+ u32 rq_pri_en:1;
+ u32 rsvd9:1;
+ u32 sg_id:10;
+ u32 sg_id_valid:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw8;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* IPv4 LRO enable: 0-disable; 1-enable; */
+ u32 lro_ipv4_en:1;
+ /* IPv6 LRO enable: 0-disable; 1-enable; */
+ u32 lro_ipv6_en:1;
+ /* LRO pkt max wqe buffer number */
+ u32 lro_max_wqe_num:6;
+ /* Each group occupies 3bits,
+ * 8 group share allocation 24bits,
+ * group 0 corresponds to the low 3bits
+ */
+ u32 vlan_pri_map_group:24;
+#else
+ u32 vlan_pri_map_group:24;
+ u32 lro_max_wqe_num:6;
+ u32 lro_ipv6_en:1;
+ u32 lro_ipv4_en:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw9;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rss_group_id:4;
+ u32 lli_frame_size:12;
+ u32 smac_h16:16;
+#else
+ u32 smac_h16:16;
+ u32 lli_frame_size:12;
+ u32 rss_group_id:4;
+#endif
+ } bs;
+
+ u32 value;
+ } dw10;
+
+ u32 smac_l32;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 oqid:16;
+ u32 vf_map_pf_id:4;
+ /*lro change; 0:changing 1:change done */
+ u32 lro_change_flag:1;
+ u32 rsvd11:1;
+ u32 base_qid:10;
+#else
+ u32 base_qid:10;
+ u32 rsvd11:1;
+ u32 lro_change_flag:1;
+ u32 vf_map_pf_id:4;
+ u32 oqid:16;
+#endif
+ } bs;
+
+ u32 value;
+ } dw12;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1:2;
+ u32 cfg_rq_depth:6;
+ u32 cfg_q_num:6;
+ u32 fc_port_id:4;
+ u32 rsvd2:14;
+#else
+ u32 rsvd2:14;
+ u32 fc_port_id:4;
+ u32 cfg_q_num:6;
+ u32 cfg_rq_depth:6;
+ u32 rsvd1:2;
+#endif
+ } bs;
+
+ u32 value;
+ } dw13;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1;
+#else
+ u32 rsvd1;
+#endif
+ } bs;
+
+ u32 value;
+
+ } dw14;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd3:2;
+ u32 bond3_hash_policy:3;
+ u32 bond3_mode:3;
+ u32 rsvd2:2;
+ u32 bond2_hash_policy:3;
+ u32 bond2_mode:3;
+ u32 rsvd1:2;
+ u32 bond1_hash_policy:3;
+ u32 bond1_mode:3;
+ u32 rsvd0:2;
+ u32 bond0_hash_policy:3;
+ u32 bond0_mode:3;
+#else
+ u32 bond0_mode:3;
+ u32 bond0_hash_policy:3;
+ u32 rsvd0:2;
+ u32 bond1_mode:3;
+ u32 bond1_hash_policy:3;
+ u32 rsvd1:2;
+ u32 bond2_mode:3;
+ u32 bond2_hash_policy:3;
+ u32 rsvd2:2;
+ u32 bond3_mode:3;
+ u32 bond3_hash_policy:3;
+ u32 rsvd3:2;
+#endif
+ } bs;
+
+ u32 value;
+
+ } dw15;
+} sml_funcfg_tbl_s;
+
+/**
+ * Struct name: sml_portcfg_tbl_s
+ * @brief: Port Configuration Table
+ * Description: Port Configuration attribute table
+ */
+typedef struct tag_sml_portcfg_tbl {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 valid:1; /* valid:0-invalid; 1-valid */
+ /* mac learn enable: 0-disable; 1-enable */
+ u32 learn_en:1;
+ u32 trunk_en:1; /* trunk enable: 0-disable; 1-enable */
+ /* broadcast suppression enable: 0-disable; 1-enable */
+ u32 bc_sups_en:1;
+ /* unknown multicast suppression enable:
+ * 0-disable; 1-enable
+ */
+ u32 un_mc_sups_en:1;
+ /* unknown unicast suppression enable:
+ * 0-disable; 1-enable
+ */
+ u32 un_uc_sups_en:1;
+ u32 ovs_mirror_tx_en:1;
+ /* ovs port enable: 0-disable; 1-enable */
+ u32 ovs_port_en:1;
+ u32 ovs_mirror_rx_en:1;
+ u32 qcn_en:1; /* qcn enable: 0-disable; 1-enable */
+ /* ucode capture enable: 0-disable; 1-enable */
+ u32 ucapture_en:1;
+ u32 ovs_invld_tcp_action:1;
+ u32 ovs_ip_frag_action:1;
+ u32 def_pri:3; /* default priority */
+ u32 rsvd3:2;
+ /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
+ * 2-Multi-Channel(unsupport)
+ */
+ u32 er_mode:2;
+ /* edge relay identifier: [0~15]. support up to 16 er */
+ u32 er_id:4;
+ u32 trunk_id:8; /* trunk identifier: [0~255] */
+#else
+ u32 trunk_id:8;
+ u32 er_id:4;
+ u32 er_mode:2;
+ u32 rsvd3:2;
+ u32 def_pri:3;
+ u32 ovs_ip_frag_action:1;
+ u32 ovs_invld_tcp_action:1;
+ u32 ucapture_en:1;
+ u32 qcn_en:1;
+ u32 ovs_mirror_rx_en:1;
+ u32 ovs_port_en:1;
+ u32 ovs_mirror_tx_en:1;
+ u32 un_uc_sups_en:1;
+ u32 un_mc_sups_en:1;
+ u32 bc_sups_en:1;
+ u32 trunk_en:1;
+ u32 learn_en:1;
+ u32 valid:1;
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd2:2;
+ u32 mtu:14;
+ u32 rsvd3:1;
+ u32 vlan_mode:3;
+ u32 vlan_id:12;
+#else
+ u32 vlan_id:12;
+ u32 vlan_mode:3;
+ u32 rsvd3:1;
+ u32 mtu:14;
+ u32 rsvd2:2;
+#endif
+ } bs;
+ u32 value;
+ } dw1;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* q7_cos : ... : q0_cos = 4bits : ... : 4bits */
+ u32 ovs_queue_cos;
+#else
+ u32 ovs_queue_cos;
+#endif
+ } bs;
+ u32 value;
+ } dw2;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1:10;
+ u32 un_mc_car_id:11;
+ u32 un_uc_car_id:11;
+#else
+ u32 un_uc_car_id:11;
+ u32 un_mc_car_id:11;
+ u32 rsvd1:10;
+#endif
+ } bs;
+ u32 value;
+ } dw3;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd6:5;
+ u32 bc_car_id:11;
+ u32 pf_promiscuous_bitmap:16;
+#else
+ u32 pf_promiscuous_bitmap:16;
+ u32 bc_car_id:11;
+ u32 rsvd6:5;
+#endif
+ } bs;
+ u32 value;
+ } dw4;
+
+ union {
+ struct {
+ u32 fc_map;
+
+ } fcoe_bs;
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 start_queue:8;
+ u32 queue_size:8;
+ u32 mirror_func_id:16;
+#else
+ u32 mirror_func_id:16;
+ u32 queue_size:8;
+ u32 start_queue:8;
+#endif
+ } ovs_mirror_bs;
+ u32 value;
+ } dw5;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u16 vlan;
+ u16 dmac_h16;
+#else
+ u16 dmac_h16;
+ u16 vlan;
+#endif
+ } fcoe_bs;
+ u32 value;
+ } dw6;
+
+ union {
+ struct {
+ u32 dmac_l32;
+
+ } fcoe_bs;
+ u32 value;
+ } dw7;
+
+} sml_portcfg_tbl_s;
+
+/**
+ * Struct name: sml_taggedlist_tbl_s
+ * @brief: Tagged List Table
+ * Description: VLAN filtering Trunk/Hybrid type tagged list table
+ */
+typedef struct tag_sml_taggedlist_tbl {
+ u32 bitmap[TBL_ID_TAGGEDLIST_BITMAP32_NUM];
+} sml_taggedlist_tbl_s;
+
+/**
+ * Struct name: sml_untaggedlist_tbl_s
+ * @brief: Untagged List Table
+ * Description: VLAN filtering Hybrid type Untagged list table
+ */
+typedef struct tag_sml_untaggedlist_tbl {
+ u32 bitmap[TBL_ID_UNTAGGEDLIST_BITMAP32_NUM];
+} sml_untaggedlist_tbl_s;
+
+/**
+ * Struct name: sml_trunkfwd_tbl_s
+ * @brief: Trunk Forward Table
+ * Description: port aggregation Eth-Trunk forwarding table
+ */
+typedef struct tag_sml_trunkfwd_tbl {
+ u16 fwd_id[TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM]; /* dw0-dw15 */
+} sml_trunkfwd_tbl_s;
+
+/**
+ * Struct name: sml_mac_tbl_head_u
+ * @brief: Mac table request/response head
+ * Description: MAC table, Hash API header
+ */
+typedef union tag_sml_mac_tbl_head {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 src:5;
+ u32 instance_id:6;
+ u32 opid:5;
+ u32 A:1;
+ u32 S:1;
+ u32 rsvd:14;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 rsvd:14;
+ u32 S:1;
+ u32 A:1;
+ u32 opid:5;
+ u32 instance_id:6;
+ u32 src:5;
+#endif
+ } req_bs;
+
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 code:2;
+ u32 subcode:2;
+ u32 node_index:28;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 node_index:28;
+ u32 subcode:2;
+ u32 code:2;
+#endif
+ } rsp_bs;
+
+ u32 value;
+} sml_mac_tbl_head_u;
+
+/**
+ * Struct name: sml_mac_tbl_8_4_key_u
+ * @brief: Mac Table Key
+ * Description: MAC table key
+ */
+typedef union tag_sml_mac_tbl_8_4_key {
+ struct {
+ u32 val0;
+ u32 val1;
+ } value;
+
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 er_id:4;
+ u32 vlan_id:12;
+ u32 mac_h16:16;
+
+ u32 mac_m16:16;
+ u32 mac_l16:16;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 mac_h16:16;
+ u32 vlan_id:12;
+ u32 er_id:4;
+
+ u32 mac_l16:16;
+ u32 mac_m16:16;
+#endif
+ } bs;
+
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 er_id:4;
+ u32 vlan_id:12;
+ u32 mac0:8;
+ u32 mac1:8;
+
+ u32 mac2:8;
+ u32 mac3:8;
+ u32 mac4:8;
+ u32 mac5:8;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 mac1:8;
+ u32 mac0:8;
+ u32 vlan_id:12;
+ u32 er_id:4;
+
+ u32 mac5:8;
+ u32 mac4:8;
+ u32 mac3:8;
+ u32 mac2:8;
+#endif
+ } mac_bs;
+} sml_mac_tbl_8_4_key_u;
+
+/**
+ * Struct name: sml_mac_tbl_8_4_item_u
+ * @brief: Mac Table Item
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef union tag_sml_mac_tbl_8_4_item {
+ u32 value;
+
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd:10;
+ u32 host_id:2;
+ u32 fwd_type:4;
+ u32 fwd_id:16;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 fwd_id:16;
+ u32 fwd_type:4;
+ u32 host_id:2;
+ u32 rsvd:10;
+#endif
+ } bs;
+} sml_mac_tbl_8_4_item_u;
+
+/**
+ * Struct name: sml_mac_tbl_key_item_s
+ * @brief: Mac Table( 8 + 4 )
+ * Description: MAC table Key + Item
+ */
+typedef struct tag_sml_mac_tbl_8_4 {
+ sml_mac_tbl_head_u head;
+ sml_mac_tbl_8_4_key_u key;
+ sml_mac_tbl_8_4_item_u item;
+} sml_mac_tbl_8_4_s;
+
+/**
+ * Struct name: sml_vtep_tbl_8_20_key_s
+ * @brief: Vtep Table Key
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_vtep_tbl_8_20_key {
+ u32 vtep_remote_ip;
+ u32 rsvd;
+} sml_vtep_tbl_8_20_key_s;
+
+/**
+ * Struct name: dmac_smac_u
+ * @brief: Dmac & Smac for VxLAN encapsulation
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef union tag_dmac_smac {
+ u16 mac_addr[6];
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u16 d_mac0:8;
+ u16 d_mac1:8;
+ u16 d_mac2:8;
+ u16 d_mac3:8;
+
+ u16 d_mac4:8;
+ u16 d_mac5:8;
+ u16 s_mac0:8;
+ u16 s_mac1:8;
+
+ u16 s_mac2:8;
+ u16 s_mac3:8;
+ u16 s_mac4:8;
+ u16 s_mac5:8;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u16 d_mac1:8;
+ u16 d_mac0:8;
+ u16 d_mac3:8;
+ u16 d_mac2:8;
+
+ u16 d_mac5:8;
+ u16 d_mac4:8;
+ u16 s_mac1:8;
+ u16 s_mac0:8;
+
+ u16 s_mac3:8;
+ u16 s_mac2:8;
+ u16 s_mac5:8;
+ u16 s_mac4:8;
+#endif
+ } bs;
+} dmac_smac_u;
+
+/**
+ * Struct name: sml_vtep_tbl_8_20_item_u
+ * @brief: Vtep Table Item
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_vtep_tbl_8_20_item {
+ dmac_smac_u dmac_smac;
+ u32 source_ip;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 er_id:4;
+ u32 rsvd:12;
+ u32 vlan:16; /* The PRI*/
+#else
+ u32 vlan:16; /* The PRI*/
+ u32 rsvd:12;
+ u32 er_id:4;
+#endif
+ } bs;
+
+ u32 value;
+ } misc;
+} sml_vtep_tbl_8_20_item_s;
+
+/**
+ * Struct name: sml_vtep_tbl_8_20_s
+ * @brief: Vtep Table( 8 + 20)
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_vtep_tbl_8_20 {
+ sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
+ sml_vtep_tbl_8_20_key_s key;
+ sml_vtep_tbl_8_20_item_s item;
+} sml_vtep_tbl_8_20_s;
+
+/**
+ * Struct name: sml_vtep_tbl_8_20_key_s
+ * @brief: Vtep Table Key
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_vxlan_udp_portcfg_4_8_key {
+ u32 udp_dest_port;
+ u32 rsvd;
+} sml_vxlan_udp_portcfg_4_8_key_s;
+
+/**
+ * Struct name: sml_vtep_tbl_8_20_item_u
+ * @brief: Vtep Table Item
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_vxlan_udp_portcfg_4_8_item {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 odp_port:12;
+ u32 dp_id:2;
+ u32 resvd:20;
+#else
+ u32 resvd:20;
+ u32 dp_id:2;
+ u32 odp_port:12;
+#endif
+ } bs;
+
+ u32 value;
+ } dw0;
+} sml_vxlan_udp_portcfg_4_8_item_s;
+
+/**
+ * Struct name: sml_vxlan_udp_portcfg_4_8_s
+ * @brief: Vxlan Dest Udp Port Table( 8 + 20)
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_vxlan_udp_portcfg_4_8 {
+ sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
+ sml_vxlan_udp_portcfg_4_8_key_s key;
+ sml_vxlan_udp_portcfg_4_8_item_s item;
+} sml_vxlan_udp_portcfg_4_8_s;
+
+/**
+ * Struct name: sml_vtep_er_info_s
+ * @brief: Vtep Er Info Table
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_vtep_er_info {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 lli_mode:1;
+ /* ER bound to the outbound port is Eth-Trunk,
+ * type (FIC/Port)
+ */
+ u32 er_fwd_trunk_type:1;
+ /* ER bound to the outbound port is Eth-Trunk,
+ * port aggregation mode (Standby/LoadBalance/LACP)
+ */
+ u32 er_fwd_trunk_mode:4;
+ u32 er_mode:2; /* ER mode (VEB/VEPA)*/
+ /* er_id as LT index but also used as entries,
+ * facilitating service
+ */
+ u32 er_id:4;
+ /* Type of the ER bound to the outbound port
+ * (Port/FIC/Eth-Trunk)
+ */
+ u32 er_fwd_type:4;
+ /* ER bound egress ID(PortID/FICID/TrunkID)*/
+ u32 er_fwd_id:16;
+#else
+ u32 er_fwd_id:16;
+ u32 er_fwd_type:4;
+ u32 er_id:4;
+ u32 er_mode:2;
+ u32 er_fwd_trunk_mode:4;
+ u32 er_fwd_trunk_type:1;
+ u32 lli_mode:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw0;
+} sml_vtep_er_info_s;
+
+/**
+ * Struct name: sml_logic_port_cfg_tbl_s
+ * @brief: Logic Port Cfg Table
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sm_logic_port_cfg {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* Input switch port (or DP_MAX_PORTS). */
+ u32 odp_port:12;
+ u32 dp_id:2; /* datapath id */
+ u32 er_id:4;
+ /* logic port MAC Learning enable or disable */
+ u32 learn_en:1;
+ u32 resvd:13;
+#else
+ u32 resvd:13;
+ /* logic port MAC Learning enable or disable */
+ u32 learn_en:1;
+ u32 er_id:4;
+ u32 dp_id:2; /* datapath id */
+ /* Input switch port (or DP_MAX_PORTS). */
+ u32 odp_port:12;
+#endif
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd4:1;
+ u32 er_fwd_trunk_type:1;
+ u32 er_fwd_trunk_mode:4;
+ u32 er_mode:2;
+ u32 er_id:4;
+ u32 er_fwd_type:4;
+ u32 er_fwd_id:16;
+#else
+ u32 er_fwd_id:16;
+ u32 er_fwd_type:4;
+ u32 er_id:4;
+ u32 er_mode:2;
+ u32 er_fwd_trunk_mode:4;
+ u32 er_fwd_trunk_type:1;
+ u32 rsvd4:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw1;
+} sml_logic_port_cfg_tbl_s;
+
+/* vport stats counter */
+typedef struct tag_vport_stats_ctr {
+ u16 rx_packets; /* total packets received */
+ u16 tx_packets; /* total packets transmitted */
+ u16 rx_bytes; /* total bytes received */
+ u16 tx_bytes; /* total bytes transmitted */
+ u16 rx_errors; /* bad packets received */
+ u16 tx_errors; /* packet transmit problems */
+ u16 rx_dropped; /* no space in linux buffers */
+ u16 tx_dropped; /* no space available in linux */
+} vport_stats_ctr_s;
+
+/**
+ * Struct name: vport_s
+ * @brief: Datapath Cfg Table
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_vport {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* dw0 */
+ u32 valid:1;
+ u32 learn_en:1;
+ u32 type:4;
+ u32 dp_id:2;
+ /* The type of Vport mapping port, 0:VF, 1:Logic Port */
+ u32 mapping_type:4;
+ u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
+ u32 rsvd:8;
+
+ /* dw1 */
+ u32 srctagl:12; /* the function used by parent context */
+ /* parent context XID used to upcall missed packet to ovs-vswitchd */
+ u32 xid:20;
+
+ /* dw2 */
+ u32 odp_port:12; /* on datapath port id */
+ /* parent context CID used to upcall missed packet to ovs-vswitchd */
+ u32 cid:20;
+#else
+ /* dw0 */
+ u32 rsvd:8;
+ u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
+ /* The type of Vport mapping port, 0:VF, 1:Logic Port */
+ u32 mapping_type:4;
+ u32 dp_id:2;
+ u32 type:4;
+ u32 learn_en:1;
+ u32 valid:1;
+
+ /* dw1 */
+ /* parent context XID used to upcall missed packet to ovs-vswitchd */
+ u32 xid:20;
+ u32 srctagl:12; /* the function used by parent context */
+
+ /* dw2 */
+ /* parent context CID used to upcall missed packet to ovs-vswitchd */
+ u32 cid:20;
+ u32 odp_port:12; /* on datapath port id */
+#endif
+
+ /* dw3 is er information and it is valid only
+ * when mapping_type=1(logic port)
+ */
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 lli_mode:1;
+ /* ER bound to the outbound port is Eth-Trunk,
+ * type (FIC/Port)
+ */
+ u32 er_fwd_trunk_type:1;
+ /* ER bound to the outbound port is Eth-Trunk,
+ * port aggregation mode (Standby/LoadBalance/LACP)
+ */
+ u32 er_fwd_trunk_mode:4;
+ u32 er_mode:2; /* ER mode (VEB/VEPA)*/
+ u32 er_id:4; /* ERID */
+ /* Type of the ER bound to the outbound port
+ * (Port/FIC/Eth-Trunk)
+ */
+ u32 er_fwd_type:4;
+ /*ER bound egress ID(PortID/FICID/TrunkID)*/
+ u32 er_fwd_id:16;
+#else
+ u32 er_fwd_id:16;
+ u32 er_fwd_type:4;
+ u32 er_id:4;
+ u32 er_mode:2;
+ u32 er_fwd_trunk_mode:4;
+ /* ER bound to the outbound port is Eth-Trunk,
+ * type (FIC/Port)
+ */
+ u32 er_fwd_trunk_type:1;
+ u32 lli_mode:1;
+#endif
+ } bs;
+ u32 value;
+ } dw3;
+
+ /* dw4~dw7 */
+ vport_stats_ctr_s stats; /* vport stats counters */
+
+} vport_s;
+
+/**
+ * Struct name: sml_elb_tbl_elem_u
+ * @brief: ELB Table Elem
+ * Description: ELB leaf table members
+ */
+typedef union tag_sml_elb_tbl_elem {
+ struct {
+ u32 fwd_val;
+ u32 next_val;
+ } value;
+
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd0:12;
+ u32 fwd_type:4;
+ u32 fwd_id:16;
+
+ u32 rsvd1:17;
+ u32 elb_index_next:15;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 fwd_id:16;
+ u32 fwd_type:4;
+ u32 rsvd0:12;
+
+ u32 elb_index_next:15;
+ u32 rsvd1:17;
+#endif
+ } bs;
+} sml_elb_tbl_elem_u;
+
+/**
+ * Struct name: sml_elb_tbl_s
+ * @brief ELB Table
+ * Description: ELB leaf table Entry
+ */
+typedef struct tag_sml_elb_tbl {
+ sml_elb_tbl_elem_u elem[TBL_ID_ELB_ENTRY_ELEM_NUM];
+} sml_elb_tbl_s;
+
+/**
+ * Struct name: sml_vlan_tbl_elem_u
+ * @brief: VLAN Table Elem
+ * Description: VLAN broadcast table members
+ */
+typedef union tag_sml_vlan_tbl_elem {
+ u16 value;
+
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u16 learn_en:1;
+ u16 elb_index:15;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u16 elb_index:15;
+ u16 learn_en:1;
+#endif
+ } bs;
+} sml_vlan_tbl_elem_u;
+
+/**
+ * Struct name: sml_vlan_tbl_s
+ * @brief: VLAN Table
+ * Entry Description: VLAN broadcast table
+ */
+typedef struct tag_sml_vlan_tbl {
+ sml_vlan_tbl_elem_u elem[TBL_ID_VLAN_ENTRY_ELEM_NUM];
+} sml_vlan_tbl_s;
+
+/**
+ * Struct name: sml_multicast_tbl_array_u
+ * @brief: Multicast Table Elem
+ * Description: multicast table members
+ */
+typedef union tag_sml_multicast_tbl_elem {
+ struct {
+ u32 route_val;
+ u32 next_val;
+ } value;
+
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd0:12;
+ u32 route_fwd_type:4;
+ u32 route_fwd_id:16;
+
+ u32 rsvd1:17;
+ u32 elb_index:15;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 route_fwd_id:16;
+ u32 route_fwd_type:4;
+ u32 rsvd0:12;
+
+ u32 elb_index:15;
+ u32 rsvd1:17;
+#endif
+ } bs;
+} sml_multicast_tbl_elem_u;
+
+/* Struct name: sml_multicast_tbl_s
+ * @brief: Multicast Table
+ * Entry Description: multicast table
+ */
+typedef struct tag_sml_multicast_tbl {
+ sml_multicast_tbl_elem_u elem[TBL_ID_MULTICAST_ENTRY_ELEM_NUM];
+} sml_multicast_tbl_s;
+
+/* Struct name: sml_observe_port_s
+ * @brief: Observe Port Table
+ * Description: observing port entries defined
+ */
+typedef struct tag_sml_observe_port {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 valid:1;
+ u32 rsvd0:11;
+ u32 dst_type:4;
+ u32 dst_id:16;
+#else
+ u32 dst_id:16;
+ u32 dst_type:4;
+ u32 rsvd0:11;
+ u32 valid:1;
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1:4;
+ u32 vlan_id:12;
+ u32 rsvd2:2;
+ u32 cut_len:14;
+#else
+ u32 cut_len:14;
+ u32 rsvd2:2;
+ u32 vlan_id:12;
+ u32 rsvd1:4;
+#endif
+ } bs;
+ u32 value;
+ } dw1;
+
+ u32 rsvd_pad[2];
+} sml_observe_port_s;
+
+/* Struct name: sml_ipmac_tbl_16_12_key_s
+ * @brief ipmac filter table key
+ * Description: ipmac filter key define
+ */
+typedef struct tag_sml_ipmac_tbl_16_12_key {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 func_id:16;
+ u32 mac_h16:16;
+#else
+ u32 mac_h16:16;
+ u32 func_id:16;
+#endif
+
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 mac_m16:16;
+ u32 mac_l16:16;
+#else
+ u32 mac_l16:16;
+ u32 mac_m16:16;
+#endif
+
+ u32 ip;
+ u32 rsvd;
+} sml_ipmac_tbl_16_12_key_s;
+
+/* Struct name: sml_ipmac_tbl_16_12_item_s
+ * @brief ipmac filter table item
+ * Description: ipmac filter item define
+ */
+typedef struct tag_sml_ipmac_tbl_16_12_item {
+ u32 rsvd[3];
+} sml_ipmac_tbl_16_12_item_s;
+
+/* Struct name: sml_ethtype_tbl_8_4_key_s
+ * @brief: ethtype filter table key
+ * Description: ethtype filter key define
+ */
+typedef struct tag_sml_ethtype_tbl_8_4_key {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 group_id:16;
+ u32 ethtype:16;
+#else
+ u32 ethtype:16;
+ u32 group_id:16;
+#endif
+
+ u32 rsvd;
+} sml_ethtype_tbl_8_4_key_s;
+
+/* Struct name: sml_ethtype_tbl_8_4_item_s
+ * @brief ethtype filter table item
+ * Description: ethtype filter item define
+ */
+typedef struct tag_sml_ethtype_tbl_8_4_item {
+ u32 rsvd;
+} sml_ethtype_tbl_8_4_item_s;
+
+/* ACL to dfx record packets*/
+typedef enum {
+ ACL_PKT_TX = 0,
+ ACL_PKT_RX = 1,
+} sml_acl_pkt_dir_e;
+
+/* ACL policy table item*/
+typedef struct tag_sml_acl_policy_tbl {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 drop:1;
+ u32 car_en:1;
+ u32 car_id:12;
+ u32 counter_type:2;
+ u32 counter_id:16;
+#else
+ u32 counter_id:16;
+ u32 counter_type:2;
+ u32 car_id:12;
+ u32 car_en:1;
+ u32 drop:1;
+#endif
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd1:7;
+ u32 mirrior_en:1;
+ u32 observer_port:10;
+ u32 change_dscp:1;
+ u32 new_dscp:6;
+ u32 change_pkt_pri:1;
+ u32 new_pkt_pri:3;
+ u32 redirect_en:3;
+#else
+ u32 redirect_en:3;
+ u32 new_pkt_pri:3;
+ u32 change_pkt_pri:1;
+ u32 new_dscp:6;
+ u32 change_dscp:1;
+ u32 observer_port:10;
+ u32 mirrior_en:1;
+ u32 rsvd1:7;
+#endif
+ } bs;
+
+ u32 value;
+ } dw1;
+
+ u32 redirect_data;
+ u32 rsvd2;
+} sml_acl_policy_tbl_s;
+
+typedef struct tag_sml_acl_ipv4_key {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* The alignment, match_key_type and
+ * later field is a KEY value
+ */
+ u32 padding:16;
+ u32 tid0:2;
+ u32 match_key_type:3; /* Matching type*/
+ u32 rsvd:11; /* Reserved field*/
+#else
+ u32 rsvd:11;
+ u32 match_key_type:3;
+ u32 tid0:2;
+ u32 padding:16;
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ /* dw1&dw2 */
+ u32 sipv4;
+ u32 dipv4;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 l4_sport:16;
+ u32 l4_dport:16;
+#else
+ u32 l4_dport:16;
+ u32 l4_sport:16;
+#endif
+ } bs;
+ u32 value;
+ } dw3;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 l4_protocol:8;
+ u32 rsvd0:8;
+ u32 seg_id:10;
+ u32 rsvd1:6;
+#else
+ u32 rsvd1:6;
+ u32 seg_id:10;
+ u32 rsvd0:8;
+ u32 l4_protocol:8;
+#endif
+ } bs;
+ u32 value;
+ } dw4;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 tid1:2;
+ u32 rsvd:14;
+ u32 padding:16;
+#else
+ u32 padding:16;
+ u32 rsvd:14;
+ u32 tid1:2;
+#endif
+ } bs;
+ u32 value;
+ } dw5;
+} sml_acl_ipv4_key_s;
+
+typedef struct tag_sml_acl_ipv6_key {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* The alignment, match_key_type and
+ * later field is a KEY value
+ */
+ u32 padding:16;
+ u32 tid0:2;
+ u32 match_key_type:3; /* Matching type*/
+ u32 rsvd:11; /* Reserved field*/
+#else
+ u32 rsvd:11;
+ u32 match_key_type:3;
+ u32 tid0:2;
+ u32 padding:16;
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ /*dw1~dw4 */
+ u32 sipv6[4];
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 tid1:2;
+ u32 rsvd1:14;
+ u32 tid2:2;
+ u32 rsvd2:14;
+#else
+ u32 rsvd2:14;
+ u32 tid2:2;
+ u32 rsvd1:14;
+ u32 tid1:2;
+#endif
+ } bs;
+ u32 value;
+ } dw5;
+
+ /*dw6~dw9 */
+ u32 dipv6[4];
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 tid3:2;
+ u32 rsvd3:14;
+ u32 tid4:2;
+ u32 rsvd4:14;
+#else
+ u32 rsvd4:14;
+ u32 tid4:2;
+ u32 rsvd3:14;
+ u32 tid3:2;
+#endif
+ } bs;
+ u32 value;
+ } dw10;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 l4_sport:16;
+ u32 l4_dport:16;
+#else
+ u32 l4_dport:16;
+ u32 l4_sport:16;
+#endif
+ } bs;
+ u32 value;
+ } dw11;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 l4_protocol:8;
+ u32 rsvd0:8;
+ u32 seg_id:10;
+ u32 rsvd1:6;
+#else
+ u32 rsvd1:6;
+ u32 seg_id:10;
+ u32 rsvd0:8;
+ u32 l4_protocol:8;
+#endif
+ } bs;
+ u32 value;
+ } dw12;
+
+ u32 dw13;
+ u32 dw14;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 tid5:2;
+ u32 rsvd5:14;
+ u32 tid6:2;
+ u32 rsvd6:14;
+#else
+ u32 rsvd6:14;
+ u32 tid6:2;
+ u32 rsvd5:14;
+ u32 tid5:2;
+#endif
+ } bs;
+ u32 value;
+ } dw15;
+
+ u32 dw16;
+ u32 dw17;
+ u32 dw18;
+ u32 dw19;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 tid7:2;
+ u32 rsvd7:30;
+#else
+ u32 rsvd7:30;
+ u32 tid7:2;
+#endif
+ } bs;
+ u32 value;
+ } dw20;
+} sml_acl_ipv6_key_s;
+
+/**
+ * Struct name: sml_voq_map_table_s
+ * @brief: voq_map_table
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_voq_map_table {
+ u16 voq_base[8];
+} sml_voq_map_table_s;
+
+/**
+ * Struct name: sml_rss_context_u
+ * @brief: rss_context
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef union tag_sml_rss_context {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 udp_ipv4:1;
+ u32 udp_ipv6:1;
+ u32 ipv4:1;
+ u32 tcp_ipv4:1;
+ u32 ipv6:1;
+ u32 tcp_ipv6:1;
+ u32 ipv6_ext:1;
+ u32 tcp_ipv6_ext:1;
+ u32 valid:1;
+ u32 rsvd1:13;
+ u32 def_qpn:10;
+#else
+ u32 def_qpn:10;
+ u32 rsvd1:13;
+ u32 valid:1;
+ u32 tcp_ipv6_ext:1;
+ u32 ipv6_ext:1;
+ u32 tcp_ipv6:1;
+ u32 ipv6:1;
+ u32 tcp_ipv4:1;
+ u32 ipv4:1;
+ u32 udp_ipv6:1;
+ u32 udp_ipv4:1;
+#endif
+ } bs;
+
+ u32 value;
+} sml_rss_context_u;
+
+typedef struct tag_sml_rss_context_tbl {
+ sml_rss_context_u element[TBL_ID_RSS_CONTEXT_NUM];
+} sml_rss_context_tbl_s;
+
+/**
+ * Struct name: sml_rss_hash_u
+ * @brief: rss_hash
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef union tag_sml_rss_hash {
+ u8 rq_index[256];
+} sml_rss_hash_u;
+
+typedef struct tag_sml_rss_hash_tbl {
+ sml_rss_hash_u element[TBL_ID_RSS_HASH_NUM];
+} sml_rss_hash_tbl_s;
+
+/**
+ * Struct name: sml_lli_5tuple_key_s
+ * @brief: lli_5tuple_key
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_lli_5tuple_key {
+ union {
+ struct {
+/** Define the struct bits */
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 src:5;
+ /* The tile need fill the Dest */
+ u32 rt:1;
+ u32 key_size:2;
+ /* determines which action that engine will take */
+ u32 profile_id:3;
+ /* indicates that requestor expect
+ * to receive a response data
+ */
+ u32 op_id:5;
+ u32 a:1;
+ u32 rsvd:12;
+ u32 vld:1;
+ u32 xy:1;
+ u32 at:1;
+#else
+ u32 at:1;
+ u32 xy:1;
+ u32 vld:1;
+ /* indicates that requestor expect to
+ * receive a response data
+ */
+ u32 rsvd:12;
+ /* determines which action that engine will take*/
+ u32 a:1;
+ u32 op_id:5;
+ u32 profile_id:3;
+ u32 key_size:2;
+ u32 rt:1;
+ u32 src:5;
+#endif
+ } bs;
+
+/* Define an unsigned member */
+ u32 value;
+ } dw0;
+ union {
+ struct {
+ u32 rsvd:1;
+ /* The tile need fill the Dest */
+ u32 address:15;
+
+ u32 table_type:5;
+ u32 ip_type:1;
+ u32 func_id:10;
+ } bs;
+
+ u32 value;
+ } misc;
+
+ u32 src_ip[4];
+ u32 dst_ip[4];
+
+ u16 src_port;
+ u16 dst_port;
+
+ u8 protocol;
+ u8 tcp_flag;
+ u8 fcoe_rctl;
+ u8 fcoe_type;
+ u16 eth_type;
+} sml_lli_5tuple_key_s;
+
+/**
+ * Struct name: sml_lli_5tuple_rsp_s
+ * @brief: lli_5tuple_rsp
+ * Description: xxxxxxxxxxxxxxx
+ */
+typedef struct tag_sml_lli_5tuple_rsp {
+ union {
+ struct {
+ u32 state:4;
+ u32 rsvd:28;
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ u32 dw1;
+
+ union {
+ struct {
+ u32 frame_size:16;
+ u32 lli_en:8;
+ u32 rsvd:8;
+ } bs;
+
+ u32 value;
+ } dw2;
+
+ u32 dw3;
+} sml_lli_5tuple_rsp_s;
+
+/**
+ * Struct name: l2nic_rx_cqe_s.
+ * @brief: l2nic_rx_cqe_s data structure.
+ * Description:
+ */
+typedef struct tag_l2nic_rx_cqe {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rx_done:1;
+ u32 bp_en:1;
+ u32 rsvd1:6;
+ u32 lro_num:8;
+ u32 checksum_err:16;
+#else
+ u32 checksum_err:16;
+ u32 lro_num:8;
+ u32 rsvd1:6;
+ u32 bp_en:1;
+ u32 rx_done:1;
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 length:16;
+ u32 vlan:16;
+#else
+ u32 vlan:16;
+ u32 length:16;
+#endif
+ } bs;
+ u32 value;
+ } dw1;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rss_type:8;
+ u32 rsvd0:2;
+ u32 vlan_offload_en:1;
+ u32 umbcast:2;
+ u32 rsvd1:7;
+ u32 pkt_types:12;
+#else
+ u32 pkt_types:12;
+ u32 rsvd1:7;
+ u32 umbcast:2;
+ u32 vlan_offload_en:1;
+ u32 rsvd0:2;
+ u32 rss_type:8;
+#endif
+ } bs;
+ u32 value;
+ } dw2;
+
+ union {
+ struct {
+ u32 rss_hash_value;
+ } bs;
+ u32 value;
+ } dw3;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 if_1588:1;
+ u32 if_tx_ts:1;
+ u32 if_rx_ts:1;
+ u32 rsvd:1;
+ u32 msg_1588_type:4;
+ u32 msg_1588_offset:8;
+ u32 tx_ts_seq:16;
+#else
+ u32 tx_ts_seq:16;
+ u32 msg_1588_offset:8;
+ u32 msg_1588_type:4;
+ u32 rsvd:1;
+ u32 if_rx_ts:1;
+ u32 if_tx_ts:1;
+ u32 if_1588:1;
+#endif
+ } bs;
+ u32 value;
+ } dw4;
+
+ union {
+ struct {
+ u32 msg_1588_ts;
+ } bs;
+
+ struct {
+ u32 rsvd0:12;
+ /* for ovs. traffic type: 0-default l2nic pkt,
+ * 1-fallback traffic, 2-miss upcall traffic,
+ * 2-command
+ */
+ u32 traffic_type:4;
+ /* for ovs. traffic from: vf_id,
+ * only support traffic_type=0(default l2nic)
+ * or 2(miss upcall)
+ */
+ u32 traffic_from:16;
+ } ovs_bs;
+
+ u32 value;
+ } dw5;
+
+ union {
+ struct {
+ u32 lro_ts;
+ } bs;
+ u32 value;
+ } dw6;
+
+ union {
+ struct {
+ u32 rsvd0;
+ } bs;
+
+ u32 localtag; /* for ovs */
+
+ u32 value;
+ } dw7;
+} l2nic_rx_cqe_s;
+
+typedef union tag_sml_global_queue_tbl_elem {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 src_tag_l:16;
+ u32 local_qid:8;
+ u32 rsvd:8;
+#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
+ u32 rsvd:8;
+ u32 local_qid:8;
+ u32 src_tag_l:16;
+#endif
+ } bs;
+
+ u32 value;
+} sml_global_queue_tbl_elem_u;
+
+typedef struct tag_sml_global_queue_tbl {
+ sml_global_queue_tbl_elem_u element[TBL_ID_GLOBAL_QUEUE_NUM];
+} sml_global_queue_tbl_s;
+
+typedef struct tag_sml_dfx_log_tbl {
+ u32 wr_init_pc_h32; /* Initial value of write_pc*/
+ u32 wr_init_pc_l32;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 state:8;
+ u32 func_en:1;
+ u32 srctag:12;
+ u32 max_num:11; /* Data block highest value*/
+#else
+ u32 max_num:11;
+ u32 srctag:12;
+ u32 func_en:1;
+ u32 state:8;
+#endif
+ } bs;
+ u32 value;
+ } dw2;
+
+ u32 ci_index;
+} sml_dfx_log_tbl_s;
+
+typedef struct tag_sml_glb_capture_tbl {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 valid:1;
+ u32 max_num:15;
+ u32 rsvd:16;
+#else
+ u32 rsvd:16;
+ u32 max_num:15;
+ u32 valid:1;
+#endif
+ } bs;
+ u32 value;
+ } dw0;
+
+ u32 discard_addr_h32;
+ u32 discard_addr_l32;
+
+ u32 rsvd0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 valid:1;
+ u32 mode:5;
+ u32 direct:2;
+ u32 offset:8;
+ u32 cos:3;
+ u32 max_num:13;
+#else
+ u32 max_num:13;
+ u32 cos:3;
+ u32 offset:8;
+ u32 direct:2;
+ u32 mode:5;
+ u32 valid:1;
+#endif
+ } bs;
+ u32 value;
+ } dw4;
+
+ u32 data_vlan;
+
+ u32 condition_addr_h32;
+ u32 condition_addr_l32;
+
+} sml_glb_capture_tbl_s;
+
+typedef struct tag_sml_cqe_addr_tbl {
+ u32 cqe_first_addr_h32;
+ u32 cqe_first_addr_l32;
+ u32 cqe_last_addr_h32;
+ u32 cqe_last_addr_l32;
+
+} sml_cqe_addr_tbl_s;
+
+/**
+ * Struct name: sml_ucode_exec_info_tbl_s
+ * @brief: ucode execption info Table
+ * Description: microcode exception information table
+ */
+typedef struct tag_ucode_exec_info_tbl {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 wptr_cpb_ack_str:4;
+ u32 mem_cpb_ack_cnums_dma:4;
+ u32 mem_cpb_ack_cmd_mode:2;
+ u32 pr_ret_vld:1;
+ u32 oeid_pd_pkt:1;
+ u32 rptr_cmd:4;
+ u32 wptr_cmd:4;
+ u32 src_tag_l:12;
+#else
+ u32 src_tag_l:12;
+ u32 wptr_cmd:4;
+ u32 rptr_cmd:4;
+ u32 oeid_pd_pkt:1;
+ u32 pr_ret_vld:1;
+ u32 mem_cpb_ack_cmd_mode:2;
+ u32 mem_cpb_ack_cnums_dma:4;
+ u32 wptr_cpb_ack_str:4;
+#endif
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 fq:16;
+ u32 exception_type:4;
+ u32 rptr_cpb_ack_str:4;
+ u32 header_oeid:8;
+#else
+ u32 header_oeid:8;
+ u32 rptr_cpb_ack_str:4;
+ u32 exception_type:4;
+ u32 fq:16;
+#endif
+ } bs;
+
+ u32 value;
+ } dw1;
+
+ u32 oeid_pd_data_l32;
+ u32 oeid_pd_data_m32;
+} sml_ucode_exec_info_s;
+
+typedef struct rq_iq_mapping_tbl {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rqid:16;
+ u32 iqid:8;
+ u32 rsvd:8;
+#else
+ u32 rsvd:8;
+ u32 iqid:8;
+ u32 rqid:16;
+#endif
+ } bs;
+ u32 value;
+ } dw[4];
+} sml_rq_iq_mapping_tbl_s;
+
+/* nic_ucode_rq_ctx table define
+ */
+typedef struct nic_ucode_rq_ctx {
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 max_count:10;
+ u32 cqe_tmpl:6;
+ u32 pkt_tmpl:6;
+ u32 wqe_tmpl:6;
+ u32 psge_valid:1;
+ u32 rsvd1:1;
+ u32 owner:1;
+ u32 ceq_en:1;
+#else
+ u32 ceq_en:1;
+ u32 owner:1;
+ u32 rsvd1:1;
+ u32 psge_valid:1;
+ u32 wqe_tmpl:6;
+ u32 pkt_tmpl:6;
+ u32 cqe_tmpl:6;
+ u32 max_count:10;
+#endif
+ } bs;
+ u32 dw0;
+ };
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* Interrupt number that L2NIC engine tell SW
+ * if generate int instead of CEQ
+ */
+ u32 int_num:10;
+ u32 ceq_count:10;
+ /* product index */
+ u32 pi:12;
+#else
+ /* product index */
+ u32 pi:12;
+ u32 ceq_count:10;
+ /* Interrupt number that L2NIC engine tell SW
+ * if generate int instead of CEQ
+ */
+ u32 int_num:10;
+#endif
+ } bs0;
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* CEQ arm, L2NIC engine will clear it after send ceq,
+ * driver should set it by CMD Q after receive all pkt.
+ */
+ u32 ceq_arm:1;
+ u32 eq_id:5;
+ u32 rsvd2:4;
+ u32 ceq_count:10;
+ /* product index */
+ u32 pi:12;
+#else
+ /* product index */
+ u32 pi:12;
+ u32 ceq_count:10;
+ u32 rsvd2:4;
+ u32 eq_id:5;
+ /* CEQ arm, L2NIC engine will clear it after send ceq,
+ * driver should set it by CMD Q after receive all pkt.
+ */
+ u32 ceq_arm:1;
+#endif
+ } bs1;
+ u32 dw1;
+ };
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ /* consumer index */
+ u32 ci:12;
+ /* WQE page address of current CI point to, high part */
+ u32 ci_wqe_page_addr_hi:20;
+#else
+ /* WQE page address of current CI point to, high part */
+ u32 ci_wqe_page_addr_hi:20;
+ /* consumer index */
+ u32 ci:12;
+#endif
+ } bs2;
+ u32 dw2;
+ };
+
+ /* WQE page address of current CI point to, low part */
+ u32 ci_wqe_page_addr_lo;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 prefetch_min:7;
+ u32 prefetch_max:11;
+ u32 prefetch_cache_threshold:14;
+#else
+ u32 prefetch_cache_threshold:14;
+ u32 prefetch_max:11;
+ u32 prefetch_min:7;
+#endif
+ } bs3;
+ u32 dw3;
+ };
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd3:31;
+ /* ownership of WQE */
+ u32 prefetch_owner:1;
+#else
+ /* ownership of WQE */
+ u32 prefetch_owner:1;
+ u32 rsvd3:31;
+#endif
+ } bs4;
+ u32 dw4;
+ };
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 prefetch_ci:12;
+ /* high part */
+ u32 prefetch_ci_wqe_page_addr_hi:20;
+#else
+ /* high part */
+ u32 prefetch_ci_wqe_page_addr_hi:20;
+ u32 prefetch_ci:12;
+#endif
+ } bs5;
+ u32 dw5;
+ };
+
+ /* low part */
+ u32 prefetch_ci_wqe_page_addr_lo;
+ /* host mem GPA, high part */
+ u32 pi_gpa_hi;
+ /* host mem GPA, low part */
+ u32 pi_gpa_lo;
+
+ union {
+ struct {
+#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
+ u32 rsvd4:9;
+ u32 ci_cla_tbl_addr_hi:23;
+#else
+ u32 ci_cla_tbl_addr_hi:23;
+ u32 rsvd4:9;
+#endif
+ } bs6;
+ u32 dw6;
+ };
+
+ u32 ci_cla_tbl_addr_lo;
+
+} nic_ucode_rq_ctx_s;
+
+#define LRO_TSO_SPACE_SIZE (240) /* (15 * 16) */
+#define RQ_CTX_SIZE (48)
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif /* __cplusplus */
+#endif /* __L2_TABLE_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
b/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
new file mode 100644
index 000000000000..39d0516c0696
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __SML_TABLE_PUB_H__
+#define __SML_TABLE_PUB_H__
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif
+#endif /* __cplusplus */
+
+/* Un-FPGA(ESL/EMU/EDA) specification */
+#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
+/* ER specification*/
+#define L2_ER_SPEC (16)
+
+/* Entry specification*/
+#define TBL_ID_FUNC_CFG_SPEC (512)
+#define TBL_ID_PORT_CFG_SPEC (16)
+#define TBL_ID_MAC_SPEC (4096)
+#define TBL_ID_MULTICAST_SPEC (1024)
+#define TBL_ID_TRUNK_SPEC (256)
+#define TBL_ID_ELB_SPEC (18432)
+#define TBL_ID_TAGGEDLIST_SPEC (80)
+#define TBL_ID_UNTAGGEDLIST_SPEC (16)
+
+/* VLAN specification*/
+#define VSW_VLAN_SPEC (4096)
+
+#else /* FPGA scenario specifications */
+
+/* ER specification*/
+#define L2_ER_SPEC (4)
+
+/* Entry specification*/
+#define TBL_ID_FUNC_CFG_SPEC (64)
+#define TBL_ID_PORT_CFG_SPEC (16)
+#define TBL_ID_MAC_SPEC (256)
+#define TBL_ID_MULTICAST_SPEC (32)
+#define TBL_ID_TRUNK_SPEC (16)
+#define TBL_ID_ELB_SPEC (1152)
+#define TBL_ID_TAGGEDLIST_SPEC (20)
+#define TBL_ID_UNTAGGEDLIST_SPEC (4)
+
+/* VLAN specification*/
+#define VSW_VLAN_SPEC (1024)
+#endif
+
+/**
+ * Number of entries elements defined
+ */
+#define TBL_ID_ELB_ENTRY_ELEM_NUM 2
+#define TBL_ID_VLAN_ENTRY_ELEM_NUM 8
+#define TBL_ID_MULTICAST_ENTRY_ELEM_NUM 2
+#define TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM 32
+#define TBL_ID_TAGGEDLIST_BITMAP32_NUM 4
+#define TBL_ID_UNTAGGEDLIST_BITMAP32_NUM 4
+#define TBL_ID_GLOBAL_QUEUE_NUM 4
+#define TBL_ID_RSS_CONTEXT_NUM 4
+#define TBL_ID_RSS_HASH_NUM 4
+
+/**
+ * NIC receiving mode defined
+ */
+#define NIC_RX_MODE_UC 0x01 /* 0b00001 */
+#define NIC_RX_MODE_MC 0x02 /* 0b00010 */
+#define NIC_RX_MODE_BC 0x04 /* 0b00100 */
+#define NIC_RX_MODE_MC_ALL 0x08 /* 0b01000 */
+#define NIC_RX_MODE_PROMISC 0x10 /* 0b10000 */
+
+/**
+ * Maximum number of HCAR
+ */
+#define QOS_MAX_HCAR_NUM (12)
+
+/**
+ * VLAN Table, Multicast Table, ELB Table Definitions
+ * The Table index and sub id index
+ */
+#define VSW_DEFAULT_VLAN0 (0)
+#define INVALID_ELB_INDEX (0)
+
+#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
+/* Supports ESL/EMU/EDA 16ER * 4K VLAN, 1 entry stored 8 vlan*/
+#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
+ ((((er_id) & 0xF) << 9) | (((vlan_id) & 0xFFF) >> 3))
+#else
+/*FPGA supports only 4ER * 1K VLAN, 1 entry stored 8 vlan*/
+#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
+ ((((er_id) & 0x3) << 7) | (((vlan_id) & 0x3FF) >> 3))
+#endif
+#define GET_VLAN_ENTRY_SUBID(vlan_id) ((vlan_id) & 0x7)
+
+#define GET_MULTICAST_TABLE_INDEX(mc_id) ((mc_id) >> 1)
+#define GET_MULTICAST_ENTRY_SUBID(mc_id) ((mc_id) & 0x1)
+
+#define GET_ELB_TABLE_INDEX(elb_id) ((elb_id) >> 1)
+#define GET_ELB_ENTRY_SUBID(elb_id) ((elb_id) & 0x1)
+
+/**
+ * taggedlist_table and untaggedlist_table access offset calculation
+ */
+#define GET_TAGLIST_TABLE_INDEX(list_id, vlan_id) \
+ (((list_id) << 5) | (((vlan_id) & 0xFFF) >> 7))
+#define GET_TAGLIST_TABLE_BITMAP_IDX(vlan_id) (((vlan_id) >> 5) & 0x3)
+#define GET_TAGLIST_TABLE_VLAN_BIT(vlan_id) \
+ (0x1UL << ((vlan_id) & 0x1F))
+
+#define TRUNK_FWDID_NOPORT 0xFFFF
+
+/**
+ * MAC type definition
+ */
+typedef enum {
+ MAC_TYPE_UC = 0,
+ MAC_TYPE_BC,
+ MAC_TYPE_MC,
+ MAC_TYPE_RSV,
+} mac_type_e;
+
+/**
+ * Ethernet port definition
+ */
+typedef enum {
+ MAG_ETH_PORT0 = 0,
+ MAG_ETH_PORT1,
+ MAG_ETH_PORT2,
+ MAG_ETH_PORT3,
+ MAG_ETH_PORT4,
+ MAG_ETH_PORT5,
+ MAG_ETH_PORT6,
+ MAG_ETH_PORT7,
+ MAG_ETH_PORT8,
+ MAG_ETH_PORT9,
+} mag_eth_port_e;
+
+/**
+ * vlan filter type defined
+ */
+typedef enum {
+ VSW_VLAN_MODE_ALL = 0,
+ VSW_VLAN_MODE_ACCESS,
+ VSW_VLAN_MODE_TRUNK,
+ VSW_VLAN_MODE_HYBRID,
+ VSW_VLAN_MODE_QINQ,
+ VSW_VLAN_MODE_MAX,
+} vsw_vlan_mode_e;
+
+/**
+ * MAC table query forwarding port type definition
+ */
+typedef enum {
+ VSW_FWD_TYPE_FUNCTION = 0, /* forward type function */
+ VSW_FWD_TYPE_VMDQ, /* forward type function-queue(vmdq) */
+ VSW_FWD_TYPE_PORT, /* forward type port */
+ VSW_FWD_TYPE_FIC, /* forward type fic */
+ VSW_FWD_TYPE_TRUNK, /* forward type trunk */
+ VSW_FWD_TYPE_DP, /* forward type DP */
+ VSW_FWD_TYPE_MC, /* forward type multicast */
+
+ /* START: is not used and has to be removed */
+ VSW_FWD_TYPE_BC, /* forward type broadcast */
+ VSW_FWD_TYPE_PF, /* forward type pf */
+ /* END: is not used and has to be removed */
+
+ VSW_FWD_TYPE_NULL, /* forward type null */
+} vsw_fwd_type_e;
+
+/**
+ * Eth-Trunk port aggregation mode
+ */
+typedef enum {
+ VSW_ETRK_MODE_STANDBY,
+ VSW_ETRK_MODE_SMAC,
+ VSW_ETRK_MODE_DMAC,
+ VSW_ETRK_MODE_SMACDMAC,
+ VSW_ETRK_MODE_SIP,
+ VSW_ETRK_MODE_DIP,
+ VSW_ETRK_MODE_SIPDIP,
+ VSW_ETRK_MODE_5TUPLES,
+ VSW_ETRK_MODE_LACP,
+ VSW_ETRK_MODE_MAX,
+} vsw_etrk_mode_e;
+
+/**
+ * Eth-Trunk port aggregation mode
+ */
+typedef enum {
+ TRUNK_MODE_STANDBY,
+ TRUNK_MODE_SMAC,
+ TRUNK_MODE_DMAC,
+ TRUNK_MODE_SMACDMAC,
+ TRUNK_MODE_SIP,
+ TRUNK_MODE_DIP,
+ TRUNK_MODE_SIPDIP,
+ TRUNK_MODE_5TUPLES,
+ TRUNK_MODE_SIPV6,
+ TRUNK_MODE_DIPV6,
+ TRUNK_MODE_SIPDIPV6,
+ TRUNK_MODE_5TUPLESV6,
+ TRUNK_MODE_LACP,
+} trunk_mode_s;
+
+/* ACL key type */
+enum {
+ ACL_KEY_IPV4 = 0,
+ ACL_KEY_IPV6
+};
+
+/* ACL filter action */
+enum {
+ ACL_ACTION_PERMIT = 0,
+ ACL_ACTION_DENY
+};
+
+/* ACL action button*/
+enum {
+ ACL_ACTION_OFF = 0,
+ ACL_ACTION_ON,
+};
+
+/* ACL statistic action*/
+enum {
+ ACL_ACTION_NO_COUNTER = 0,
+ ACL_ACTION_COUNT_PKT,
+ ACL_ACTION_COUNT_PKT_LEN,
+};
+
+/* ACL redirect action*/
+enum {
+ ACL_ACTION_FORWAR_UP = 1,
+ ACL_ACTION_FORWAR_PORT,
+ ACL_ACTION_FORWAR_NEXT_HOP,
+ ACL_ACTION_FORWAR_OTHER,
+};
+
+enum {
+ CEQ_TIMER_STOP = 0,
+ CEQ_TIMER_START,
+};
+
+enum {
+ CEQ_API_DISPATCH = 0,
+ CEQ_API_NOT_DISPATCH,
+};
+
+enum {
+ CEQ_MODE = 1,
+ INT_MODE,
+};
+
+enum {
+ ER_MODE_VEB,
+ ER_MODE_VEPA,
+ ER_MODE_MULTI,
+ ER_MODE_NULL,
+};
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif
+#endif /* __cplusplus */
+#endif /* __L2_TABLE_PUB_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.c
b/drivers/net/ethernet/huawei/hinic/hinic_wq.c
new file mode 100644
index 000000000000..43d34ea72363
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwif.h"
+#include "hinic_wq.h"
+#include "hinic_qe_def.h"
+
+#define WQS_MAX_NUM_BLOCKS 128
+#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
+ sizeof((wqs)->free_blocks[0]))
+
+static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx,
+ u32 *block_idx);
+
+static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx,
+ u32 block_idx);
+
+static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr,
+ u64 **shadow_vaddr, u64 page_sz)
+{
+ dma_addr_t dma_addr = 0;
+
+ *vaddr = dma_zalloc_coherent(handle, page_sz, &dma_addr,
+ GFP_KERNEL);
+ if (!*vaddr) {
+ sdk_err(handle, "Failed to allocate dma to wqs page\n");
+ return -ENOMEM;
+ }
+
+ if (!ADDR_4K_ALIGNED(dma_addr)) {
+ sdk_err(handle, "Cla is not 4k aligned!\n");
+ goto shadow_vaddr_err;
+ }
+
+ *paddr = (u64)dma_addr;
+
+ /* use vzalloc for big mem, shadow_vaddr only used at initialization */
+ *shadow_vaddr = vzalloc(page_sz);
+ if (!*shadow_vaddr) {
+ sdk_err(handle, "Failed to allocate shadow page vaddr\n");
+ goto shadow_vaddr_err;
+ }
+
+ return 0;
+
+shadow_vaddr_err:
+ dma_free_coherent(handle, page_sz, *vaddr, dma_addr);
+ return -ENOMEM;
+}
+
+static int wqs_allocate_page(struct hinic_wqs *wqs, u32 page_idx)
+{
+ return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx],
+ &wqs->page_paddr[page_idx],
+ &wqs->shadow_page_vaddr[page_idx],
+ WQS_PAGE_SIZE);
+}
+
+static void wqs_free_page(struct hinic_wqs *wqs, u32 page_idx)
+{
+ dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE,
+ wqs->page_vaddr[page_idx],
+ (dma_addr_t)wqs->page_paddr[page_idx]);
+ vfree(wqs->shadow_page_vaddr[page_idx]);
+}
+
+static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
+{
+ return queue_alloc_page(cmdq_pages->dev_hdl,
+ &cmdq_pages->cmdq_page_vaddr,
+ &cmdq_pages->cmdq_page_paddr,
+ &cmdq_pages->cmdq_shadow_page_vaddr,
+ CMDQ_PAGE_SIZE);
+}
+
+static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
+{
+ dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE,
+ cmdq_pages->cmdq_page_vaddr,
+ (dma_addr_t)cmdq_pages->cmdq_page_paddr);
+ vfree(cmdq_pages->cmdq_shadow_page_vaddr);
+}
+
+static int alloc_wqes_shadow(struct hinic_wq *wq)
+{
+ u64 size;
+
+ /* if wq->max_wqe_size == 0, we don't need to alloc shadow */
+ if (wq->max_wqe_size <= wq->wqebb_size)
+ return 0;
+
+ size = (u64)wq->num_q_pages * wq->max_wqe_size;
+ wq->shadow_wqe = kzalloc(size, GFP_KERNEL);
+ if (!wq->shadow_wqe) {
+ pr_err("Failed to allocate shadow wqe\n");
+ return -ENOMEM;
+ }
+
+ size = wq->num_q_pages * sizeof(wq->prod_idx);
+ wq->shadow_idx = kzalloc(size, GFP_KERNEL);
+ if (!wq->shadow_idx) {
+ pr_err("Failed to allocate shadow index\n");
+ goto shadow_idx_err;
+ }
+
+ return 0;
+
+shadow_idx_err:
+ kfree(wq->shadow_wqe);
+ return -ENOMEM;
+}
+
+static void free_wqes_shadow(struct hinic_wq *wq)
+{
+ if (wq->max_wqe_size <= wq->wqebb_size)
+ return;
+
+ kfree(wq->shadow_idx);
+ kfree(wq->shadow_wqe);
+}
+
+static void free_wq_pages(void *handle, struct hinic_wq *wq,
+ u32 num_q_pages)
+{
+ u32 i;
+
+ for (i = 0; i < num_q_pages; i++)
+ hinic_dma_free_coherent_align(handle, &wq->mem_align[i]);
+
+ free_wqes_shadow(wq);
+
+ wq->block_vaddr = NULL;
+ wq->shadow_block_vaddr = NULL;
+
+ kfree(wq->mem_align);
+}
+
+static int alloc_wq_pages(void *dev_hdl, struct hinic_wq *wq)
+{
+ struct hinic_dma_addr_align *mem_align;
+ u64 *vaddr, *paddr;
+ u32 i, num_q_pages;
+ int err;
+
+ vaddr = wq->shadow_block_vaddr;
+ paddr = wq->block_vaddr;
+
+ num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
+ if (num_q_pages > WQ_MAX_PAGES) {
+ sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n",
+ num_q_pages);
+ return -EINVAL;
+ }
+
+ if (num_q_pages & (num_q_pages - 1)) {
+ sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n",
+ num_q_pages);
+ return -EINVAL;
+ }
+
+ wq->num_q_pages = num_q_pages;
+
+ err = alloc_wqes_shadow(wq);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate wqe shadow\n");
+ return err;
+ }
+
+ wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align),
+ GFP_KERNEL);
+ if (!wq->mem_align) {
+ sdk_err(dev_hdl, "Failed to allocate mem_align\n");
+ free_wqes_shadow(wq);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_q_pages; i++) {
+ mem_align = &wq->mem_align[i];
+ err = hinic_dma_zalloc_coherent_align(dev_hdl, wq->wq_page_size,
+ wq->wq_page_size,
+ GFP_KERNEL, mem_align);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate wq page\n");
+ goto alloc_wq_pages_err;
+ }
+
+ *paddr = cpu_to_be64(mem_align->align_paddr);
+ *vaddr = (u64)mem_align->align_vaddr;
+
+ paddr++;
+ vaddr++;
+ }
+
+ return 0;
+
+alloc_wq_pages_err:
+ free_wq_pages(dev_hdl, wq, i);
+
+ return -ENOMEM;
+}
+
+int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
+ u32 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u32 max_wqe_size)
+{
+ u32 num_wqebbs_per_page;
+ int err;
+
+ if (wqebb_size == 0) {
+ sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n");
+ return -EINVAL;
+ }
+
+ if (q_depth & (q_depth - 1)) {
+ sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n",
+ q_depth);
+ return -EINVAL;
+ }
+
+ if (wq_page_size & (wq_page_size - 1)) {
+ sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n",
+ wq_page_size);
+ return -EINVAL;
+ }
+
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+
+ if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n",
+ num_wqebbs_per_page);
+ return -EINVAL;
+ }
+
+ err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
+ if (err) {
+ sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n");
+ return err;
+ }
+
+ wq->wqebb_size = wqebb_size;
+ wq->wq_page_size = wq_page_size;
+ wq->q_depth = q_depth;
+ wq->max_wqe_size = max_wqe_size;
+ wq->num_wqebbs_per_page = num_wqebbs_per_page;
+
+ wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page);
+
+ wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
+ wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
+ wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
+
+ err = alloc_wq_pages(wqs->dev_hdl, wq);
+ if (err) {
+ sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n");
+ goto alloc_wq_pages_err;
+ }
+
+ atomic_set(&wq->delta, q_depth);
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+ wq->mask = q_depth - 1;
+
+ return 0;
+
+alloc_wq_pages_err:
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+ return err;
+}
+
+void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
+{
+ free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages);
+
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+}
+
+static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx,
+ u32 *block_idx)
+{
+ u32 pos;
+
+ spin_lock(&wqs->alloc_blocks_lock);
+
+ if (wqs->num_free_blks <= 0) {
+ spin_unlock(&wqs->alloc_blocks_lock);
+ return -ENOMEM;
+ }
+ wqs->num_free_blks--;
+
+ pos = wqs->alloc_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ *page_idx = wqs->free_blocks[pos].page_idx;
+ *block_idx = wqs->free_blocks[pos].block_idx;
+
+ wqs->free_blocks[pos].page_idx = 0xFFFFFFFF;
+ wqs->free_blocks[pos].block_idx = 0xFFFFFFFF;
+
+ spin_unlock(&wqs->alloc_blocks_lock);
+
+ return 0;
+}
+
+static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx,
+ u32 block_idx)
+{
+ u32 pos;
+
+ spin_lock(&wqs->alloc_blocks_lock);
+
+ wqs->num_free_blks++;
+
+ pos = wqs->return_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = block_idx;
+
+ spin_unlock(&wqs->alloc_blocks_lock);
+}
+
+static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
+{
+ u32 page_idx, blk_idx, pos = 0;
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = blk_idx;
+ pos++;
+ }
+ }
+
+ wqs->alloc_blk_pos = 0;
+ wqs->return_blk_pos = 0;
+ wqs->num_free_blks = WQS_MAX_NUM_BLOCKS;
+ spin_lock_init(&wqs->alloc_blocks_lock);
+}
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)
+{
+ u64 *block_vaddr;
+ u32 pg_idx;
+
+ block_vaddr = wq->shadow_block_vaddr;
+
+ atomic_set(&wq->delta, wq->q_depth);
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++)
+ memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size);
+}
+
+int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, void *dev_hdl,
+ int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
+ u16 q_depth, u32 max_wqe_size)
+{
+ int i, j, err = -ENOMEM;
+
+ if (q_depth & (q_depth - 1)) {
+ sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n",
+ q_depth);
+ return -EINVAL;
+ }
+
+ cmdq_pages->dev_hdl = dev_hdl;
+
+ err = cmdq_allocate_page(cmdq_pages);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate CMDQ page\n");
+ return err;
+ }
+
+ for (i = 0; i < cmdq_blocks; i++) {
+ wq[i].page_idx = 0;
+ wq[i].block_idx = (u32)i;
+ wq[i].wqebb_size = wqebb_size;
+ wq[i].wq_page_size = wq_page_size;
+ wq[i].q_depth = q_depth;
+ wq[i].max_wqe_size = max_wqe_size;
+ wq[i].num_wqebbs_per_page =
+ ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+
+ wq[i].wqebbs_per_page_shift =
+ (u32)ilog2(wq[i].num_wqebbs_per_page);
+
+ wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
+ wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
+ wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
+
+ err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n");
+ goto cmdq_block_err;
+ }
+
+ atomic_set(&wq[i].delta, q_depth);
+ wq[i].cons_idx = 0;
+ wq[i].prod_idx = 0;
+ wq[i].mask = q_depth - 1;
+ }
+
+ return 0;
+
+cmdq_block_err:
+ for (j = 0; j < i; j++)
+ free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+ return err;
+}
+
+void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, int cmdq_blocks)
+{
+ int i;
+
+ for (i = 0; i < cmdq_blocks; i++)
+ free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+}
+
+static int alloc_page_addr(struct hinic_wqs *wqs)
+{
+ u64 size = wqs->num_pages * sizeof(*wqs->page_paddr);
+
+ wqs->page_paddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->page_paddr)
+ return -ENOMEM;
+
+ size = wqs->num_pages * sizeof(*wqs->page_vaddr);
+ wqs->page_vaddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->page_vaddr)
+ goto page_vaddr_err;
+
+ size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
+ wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->shadow_page_vaddr)
+ goto page_shadow_vaddr_err;
+
+ return 0;
+
+page_shadow_vaddr_err:
+ kfree(wqs->page_vaddr);
+
+page_vaddr_err:
+ kfree(wqs->page_paddr);
+ return -ENOMEM;
+}
+
+static void free_page_addr(struct hinic_wqs *wqs)
+{
+ kfree(wqs->shadow_page_vaddr);
+ kfree(wqs->page_vaddr);
+ kfree(wqs->page_paddr);
+}
+
+int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl)
+{
+ u32 i, page_idx;
+ int err;
+
+ wqs->dev_hdl = dev_hdl;
+ wqs->num_pages = WQ_NUM_PAGES(num_wqs);
+
+ if (alloc_page_addr(wqs)) {
+ sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n");
+ return -ENOMEM;
+ }
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ err = wqs_allocate_page(wqs, page_idx);
+ if (err) {
+ sdk_err(dev_hdl, "Failed wq page allocation\n");
+ goto wq_allocate_page_err;
+ }
+ }
+
+ wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL);
+ if (!wqs->free_blocks) {
+ err = -ENOMEM;
+ goto alloc_blocks_err;
+ }
+
+ init_wqs_blocks_arr(wqs);
+ return 0;
+
+alloc_blocks_err:
+wq_allocate_page_err:
+ for (i = 0; i < page_idx; i++)
+ wqs_free_page(wqs, i);
+
+ free_page_addr(wqs);
+ return err;
+}
+
+void hinic_wqs_free(struct hinic_wqs *wqs)
+{
+ u32 page_idx;
+
+ spin_lock_deinit(&wqs->alloc_blocks_lock);
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
+ wqs_free_page(wqs, page_idx);
+
+ free_page_addr(wqs);
+ kfree(wqs->free_blocks);
+}
+
+static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
+ int num_wqebbs, u16 prod_idx)
+{
+ u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr;
+ u32 i, offset;
+ u16 idx;
+
+ for (i = 0; i < (u32)num_wqebbs; i++) {
+ offset = i * wq->wqebb_size;
+ shadow_wqebb_addr = (u8 *)shadow_addr + offset;
+
+ idx = MASKED_WQE_IDX(wq, prod_idx + i);
+ wqe_page_addr = WQ_PAGE_ADDR(wq, idx);
+ wqebb_addr = wqe_page_addr +
+ WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx));
+
+ memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size);
+ }
+}
+
+static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
+ int num_wqebbs, u16 prod_idx)
+{
+ u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr;
+ u32 i, offset;
+ u16 idx;
+
+ for (i = 0; i < (u32)num_wqebbs; i++) {
+ offset = i * wq->wqebb_size;
+ shadow_wqebb_addr = (u8 *)shadow_addr + offset;
+
+ idx = MASKED_WQE_IDX(wq, prod_idx + i);
+ wqe_page_addr = WQ_PAGE_ADDR(wq, idx);
+ wqebb_addr = wqe_page_addr +
+ WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx));
+
+ memcpy(wqebb_addr, shadow_wqebb_addr, wq->wqebb_size);
+ }
+}
+
+void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index)
+{
+ return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index);
+}
+
+u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq)
+{
+ return be64_to_cpu(*wq->block_vaddr);
+}
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)
+{
+ u32 curr_pg, end_pg;
+ u16 curr_prod_idx, end_prod_idx;
+
+ if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) {
+ atomic_add(num_wqebbs, &wq->delta);
+ return NULL;
+ }
+
+ /* use original cur_pi and end_pi, no need queue depth mask as
+ * WQE_PAGE_NUM will do num_queue_pages mask
+ */
+ curr_prod_idx = (u16)wq->prod_idx;
+ wq->prod_idx += num_wqebbs;
+
+ /* end prod index should points to the last wqebb of wqe,
+ * therefore minus 1
+ */
+ end_prod_idx = (u16)wq->prod_idx - 1;
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
+
+ *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
+
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
+ u32 offset = curr_pg * wq->max_wqe_size;
+ u8 *shadow_addr = wq->shadow_wqe + offset;
+
+ wq->shadow_idx[curr_pg] = *prod_idx;
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
+}
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)
+{
+ atomic_add(num_wqebbs, &wq->delta);
+ wq->cons_idx += num_wqebbs;
+}
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)
+{
+ u32 curr_pg, end_pg;
+ u16 curr_cons_idx, end_cons_idx;
+
+ if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
+ return NULL;
+
+ curr_cons_idx = (u16)wq->cons_idx;
+
+ curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
+ end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
+
+ *cons_idx = curr_cons_idx;
+
+ if (curr_pg != end_pg) {
+ u32 offset = curr_pg * wq->max_wqe_size;
+ u8 *shadow_addr = wq->shadow_wqe + offset;
+
+ copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
+
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
+}
+
+static inline int wqe_shadow(struct hinic_wq *wq, void *wqe)
+{
+ void *end_wqe_shadow_addr;
+ u32 wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
+
+ end_wqe_shadow_addr = &wq->shadow_wqe[wqe_shadow_size];
+
+ return WQE_IN_RANGE(wqe, wq->shadow_wqe, end_wqe_shadow_addr);
+}
+
+void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs)
+{
+ u16 curr_pg;
+ u16 prod_idx;
+
+ if (wqe_shadow(wq, wqe)) {
+ curr_pg = WQE_SHADOW_PAGE(wq, wqe);
+ prod_idx = wq->shadow_idx[curr_pg];
+
+ copy_wqe_from_shadow(wq, wqe, num_wqebbs, prod_idx);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.h
b/drivers/net/ethernet/huawei/hinic/hinic_wq.h
new file mode 100644
index 000000000000..88a07d80b4dd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_WQ_H
+#define HINIC_WQ_H
+
+struct hinic_free_block {
+ u32 page_idx;
+ u32 block_idx;
+};
+
+struct hinic_wq {
+ /* The addresses are 64 bit in the HW */
+ u64 block_paddr;
+ u64 *shadow_block_vaddr;
+ u64 *block_vaddr;
+
+ u32 wqebb_size;
+ u32 wq_page_size;
+ u16 q_depth;
+ u32 max_wqe_size;
+ u32 num_wqebbs_per_page;
+
+ /* performance: replace mul/div as shift;
+ * num_wqebbs_per_page must be power of 2
+ */
+ u32 wqebbs_per_page_shift;
+ u32 page_idx;
+ u32 block_idx;
+
+ u32 num_q_pages;
+
+ struct hinic_dma_addr_align *mem_align;
+
+ int cons_idx;
+ int prod_idx;
+
+ atomic_t delta;
+ u16 mask;
+
+ u8 *shadow_wqe;
+ u16 *shadow_idx;
+};
+
+struct hinic_cmdq_pages {
+ /* The addresses are 64 bit in the HW */
+ u64 cmdq_page_paddr;
+ u64 *cmdq_page_vaddr;
+ u64 *cmdq_shadow_page_vaddr;
+
+ void *dev_hdl;
+};
+
+struct hinic_wqs {
+ /* The addresses are 64 bit in the HW */
+ u64 *page_paddr;
+ u64 **page_vaddr;
+ u64 **shadow_page_vaddr;
+
+ struct hinic_free_block *free_blocks;
+ u32 alloc_blk_pos;
+ u32 return_blk_pos;
+ int num_free_blks;
+
+ /* for allocate blocks */
+ spinlock_t alloc_blocks_lock;
+
+ u32 num_pages;
+
+ void *dev_hdl;
+};
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq);
+
+int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, void *dev_hdl,
+ int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
+ u16 q_depth, u32 max_wqe_size);
+
+void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, int cmdq_blocks);
+
+int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl);
+
+void hinic_wqs_free(struct hinic_wqs *wqs);
+
+int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
+ u32 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u32 max_wqe_size);
+
+void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
+
+void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index);
+
+u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq);
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx);
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs);
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx);
+
+void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl.h
b/drivers/net/ethernet/huawei/hinic/ossl_knl.h
new file mode 100644
index 000000000000..0f017a05e55e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/ossl_knl.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef OSSL_KNL_H
+#define OSSL_KNL_H
+
+#include "ossl_knl_linux.h"
+
+#if defined(__WIN__) || defined(__VMWARE__)
+#define __WIN_OR_VMWARE__
+#endif
+
+#if (defined(__WIN__) || defined(__VMWARE__)) && !defined(__HIFC__)
+#define __WIN_OR_VMWARE_AND_NONHIFC__
+#endif
+
+#define sdk_err(dev, format, ...) \
+ dev_err(dev, "[COMM]"format, ##__VA_ARGS__)
+#define sdk_warn(dev, format, ...) \
+ dev_warn(dev, "[COMM]"format, ##__VA_ARGS__)
+#define sdk_notice(dev, format, ...) \
+ dev_notice(dev, "[COMM]"format, ##__VA_ARGS__)
+#define sdk_info(dev, format, ...) \
+ dev_info(dev, "[COMM]"format, ##__VA_ARGS__)
+
+#define nic_err(dev, format, ...) \
+ dev_err(dev, "[NIC]"format, ##__VA_ARGS__)
+#define nic_warn(dev, format, ...) \
+ dev_warn(dev, "[NIC]"format, ##__VA_ARGS__)
+#define nic_notice(dev, format, ...) \
+ dev_notice(dev, "[NIC]"format, ##__VA_ARGS__)
+#define nic_info(dev, format, ...) \
+ dev_info(dev, "[NIC]"format, ##__VA_ARGS__)
+
+#endif /* OSSL_KNL_H */
diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c
b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c
new file mode 100644
index 000000000000..4475206f0d95
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#include <linux/pci_regs.h>
+
+#include "ossl_knl_linux.h"
+
+#define OSSL_MINUTE_BASE (60)
+
+sdk_file *file_creat(char *file_name)
+{
+ return filp_open(file_name, O_CREAT | O_RDWR | O_APPEND, 0);
+}
+
+sdk_file *file_open(char *file_name)
+{
+ return filp_open(file_name, O_RDONLY, 0);
+}
+
+void file_close(sdk_file *file_handle)
+{
+ (void)filp_close(file_handle, NULL);
+}
+
+u32 get_file_size(sdk_file *file_handle)
+{
+ struct inode *file_inode;
+
+ file_inode = file_handle->f_inode;
+
+ return (u32)(file_inode->i_size);
+}
+
+void set_file_position(sdk_file *file_handle, u32 position)
+{
+ file_handle->f_pos = position;
+}
+
+int file_read(sdk_file *file_handle, char *log_buffer,
+ u32 rd_length, u32 *file_pos)
+{
+ return (int)file_handle->f_op->read(file_handle, log_buffer,
+ rd_length, &file_handle->f_pos);
+}
+
+u32 file_write(sdk_file *file_handle, char *log_buffer, u32 wr_length)
+{
+ return (u32)file_handle->f_op->write(file_handle, log_buffer,
+ wr_length, &file_handle->f_pos);
+}
+
+static int _linux_thread_func(void *thread)
+{
+ struct sdk_thread_info *info = (struct sdk_thread_info *)thread;
+
+ while (!kthread_should_stop())
+ info->thread_fn(info->data);
+
+ return 0;
+}
+
+int creat_thread(struct sdk_thread_info *thread_info)
+{
+ thread_info->thread_obj = kthread_run(_linux_thread_func,
+ thread_info, thread_info->name);
+ if (!thread_info->thread_obj)
+ return -EFAULT;
+
+ return 0;
+}
+
+void stop_thread(struct sdk_thread_info *thread_info)
+{
+ if (thread_info->thread_obj)
+ (void)kthread_stop(thread_info->thread_obj);
+}
+
+void utctime_to_localtime(u64 utctime, u64 *localtime)
+{
+ *localtime = utctime - sys_tz.tz_minuteswest * OSSL_MINUTE_BASE;
+}
+
+#ifndef HAVE_TIMER_SETUP
+void initialize_timer(void *adapter_hdl, struct timer_list *timer)
+{
+ if (!adapter_hdl || !timer)
+ return;
+
+ init_timer(timer);
+}
+#endif
+
+void add_to_timer(struct timer_list *timer, long period)
+{
+ if (!timer)
+ return;
+
+ add_timer(timer);
+}
+
+void stop_timer(struct timer_list *timer)
+{
+}
+
+void delete_timer(struct timer_list *timer)
+{
+ if (!timer)
+ return;
+
+ del_timer_sync(timer);
+}
+
+int local_atoi(const char *name)
+{
+ int val = 0;
+
+ for (;; name++) {
+ switch (*name) {
+ case '0' ... '9':
+ val = 10 * val + (*name - '0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
new file mode 100644
index 000000000000..a7c676a599ea
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef OSSL_KNL_LINUX_H_
+#define OSSL_KNL_LINUX_H_
+
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/fs.h>
+#include <linux/kthread.h>
+#include <net/checksum.h>
+#include <net/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/udp.h>
+#include <linux/highmem.h>
+
+/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
+#ifndef UTS_RELEASE
+/* utsrelease.h changed locations in 2.6.33 */
+#include <generated/utsrelease.h>
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
+#ifndef SUPPORTED_100000BASEKR4_Full
+#define SUPPORTED_100000BASEKR4_Full 0
+#define ADVERTISED_100000BASEKR4_Full 0
+#endif
+#ifndef SUPPORTED_100000BASECR4_Full
+#define SUPPORTED_100000BASECR4_Full 0
+#define ADVERTISED_100000BASECR4_Full 0
+#endif
+
+#ifndef SUPPORTED_40000BASEKR4_Full
+#define SUPPORTED_40000BASEKR4_Full 0
+#define ADVERTISED_40000BASEKR4_Full 0
+#endif
+#ifndef SUPPORTED_40000BASECR4_Full
+#define SUPPORTED_40000BASECR4_Full 0
+#define ADVERTISED_40000BASECR4_Full 0
+#endif
+
+#ifndef SUPPORTED_25000BASEKR_Full
+#define SUPPORTED_25000BASEKR_Full 0
+#define ADVERTISED_25000BASEKR_Full 0
+#endif
+#ifndef SUPPORTED_25000BASECR_Full
+#define SUPPORTED_25000BASECR_Full 0
+#define ADVERTISED_25000BASECR_Full 0
+#endif
+
+#ifndef ETHTOOL_GLINKSETTINGS
+enum ethtool_link_mode_bit_indices {
+ ETHTOOL_LINK_MODE_1000BASEKX_Full_BIT = 17,
+ ETHTOOL_LINK_MODE_10000BASEKR_Full_BIT = 19,
+ ETHTOOL_LINK_MODE_40000BASEKR4_Full_BIT = 23,
+ ETHTOOL_LINK_MODE_40000BASECR4_Full_BIT = 24,
+ ETHTOOL_LINK_MODE_25000BASECR_Full_BIT = 31,
+ ETHTOOL_LINK_MODE_25000BASEKR_Full_BIT = 32,
+ ETHTOOL_LINK_MODE_100000BASEKR4_Full_BIT = 36,
+ ETHTOOL_LINK_MODE_100000BASECR4_Full_BIT = 38,
+};
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+
+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 0))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 0)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 1))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 1)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 2))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 3)
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5. */
+#define RHEL_RELEASE_CODE 0
+#endif
+
+/* RHEL 7 didn't backport the parameter change in
+ * create_singlethread_workqueue.
+ * If/when RH corrects this we will want to tighten up the version check.
+ */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0))
+#undef create_singlethread_workqueue
+#define create_singlethread_workqueue(name) \
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+#endif
+
+/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find
+ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new
+ * enough versions of Ubuntu. Otherwise you can simply see it in the output of
+ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in
+ * the linux-source package, but in the linux-headers package. It begins to
+ * appear in later releases of 14.04 and 14.10.
+ *
+ * Ex:
+ * <Ubuntu 14.04.1>
+ * $uname -r
+ * 3.13.0-45-generic
+ * ABI is 45
+ *
+ * <Ubuntu 14.10>
+ * $uname -r
+ * 3.16.0-23-generic
+ * ABI is 23.
+ */
+#ifndef UTS_UBUNTU_RELEASE_ABI
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#else
+
+#if UTS_UBUNTU_RELEASE_ABI > 255
+#error UTS_UBUNTU_RELEASE_ABI is too large...
+#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */
+
+#endif
+
+/* Note that the 3rd digit is always zero, and will be ignored. This is
+ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux
+ * version codes are 3 digit, this 3rd digit is superseded by the ABI value.
+ */
+#define UBUNTU_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, 0) << 8) + (d))
+
+/* SuSE version macros are the same as Linux kernel version macro. */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c)
+#endif
+#define SLE_LOCALVERSION(a, b, c) KERNEL_VERSION(a, b, c)
+#ifdef CONFIG_SUSE_KERNEL
+#if (KERNEL_VERSION(92, 0, 0) <= SLE_LOCALVERSION_CODE && \
+ KERNEL_VERSION(93, 0, 0) > SLE_LOCALVERSION_CODE)
+/* SLES12 SP2 GA is 4.4.21-69.
+ * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59}
+ * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120}
+ * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y
+ */
+#define SLE_VERSION_CODE SLE_VERSION(12, 2, 0)
+#else
+/* SLES15 Beta1 is 4.12.14-2.
+ * SLES12 SP4 will also use 4.12.14-nn.xx.y
+ */
+#define SLE_VERSION_CODE SLE_VERSION(15, 0, 0)
+/* new SLES kernels must be added here with >= based on kernel
+ * the idea is to order from newest to oldest and just catch all
+ * of them using the >=
+ */
+#endif /* LINUX_VERSION_CODE == KERNEL VERSION(x,y,z) */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+#ifndef SLE_LOCALVERSION_CODE
+#define SLE_LOCALVERSION_CODE 0
+#endif /* SLE_LOCALVERSION_CODE */
+
+#ifndef ALIGN_DOWN
+#ifndef __ALIGN_KERNEL
+#define __ALIGN_KERNEL(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1)
+#endif
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+#endif
+
+/*****************************************************************************/
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+
+/*****************************************************************************/
+
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_VERSION(6, 2) <= RHEL_RELEASE_CODE) && \
+ (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE))
+#define HAVE_RHEL6_NET_DEVICE_EXTENDED
+#endif /* RHEL >= 6.2 && RHEL < 7.0 */
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_VERSION(6, 6) <= RHEL_RELEASE_CODE) && \
+ (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE))
+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT
+#define HAVE_NDO_SET_FEATURES
+#endif /* RHEL >= 6.6 && RHEL < 7.0 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+#define HAVE_INET6_IFADDR_LIST
+
+/*****************************************************************************/
+
+#define HAVE_NDO_GET_STATS64
+
+/*****************************************************************************/
+
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#define HAVE_IRQ_AFFINITY_NOTIFY
+
+/*****************************************************************************/
+#define HAVE_ETHTOOL_SET_PHYS_ID
+
+/*****************************************************************************/
+#define HAVE_NETDEV_WANTED_FEAUTES
+
+/*****************************************************************************/
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#ifndef HAVE_SKB_L4_RXHASH
+#define HAVE_SKB_L4_RXHASH
+#endif
+
+/*****************************************************************************/
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+
+/*****************************************************************************/
+#include <linux/kconfig.h>
+
+#define _kc_kmap_atomic(page) kmap_atomic(page)
+#define _kc_kunmap_atomic(addr) kunmap_atomic(addr)
+
+/*****************************************************************************/
+#include <linux/of_net.h>
+#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+#define HAVE_NAPI_GRO_FLUSH_OLD
+
+/*****************************************************************************/
+#ifndef HAVE_SRIOV_CONFIGURE
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+/*****************************************************************************/
+#define HAVE_ENCAP_TSO_OFFLOAD
+#define HAVE_SKB_INNER_NETWORK_HEADER
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_VERSION(7, 0) <= RHEL_RELEASE_CODE) && \
+ (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE))
+#define HAVE_RHEL7_PCI_DRIVER_RH
+#if (RHEL_RELEASE_VERSION(7, 2) <= RHEL_RELEASE_CODE)
+#define HAVE_RHEL7_PCI_RESET_NOTIFY
+#endif /* RHEL >= 7.2 */
+#if (RHEL_RELEASE_VERSION(7, 3) <= RHEL_RELEASE_CODE)
+#define HAVE_GENEVE_RX_OFFLOAD
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+#ifdef ETHTOOL_GLINKSETTINGS
+/* pay attention pangea platform when use this micro */
+#define HAVE_ETHTOOL_25G_BITS
+#endif /* ETHTOOL_GLINKSETTINGS */
+#endif /* RHEL >= 7.3 */
+
+/* new hooks added to net_device_ops_extended in RHEL7.4 */
+#if (RHEL_RELEASE_VERSION(7, 4) <= RHEL_RELEASE_CODE)
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL
+#define HAVE_UDP_ENC_RX_OFFLOAD
+#endif /* RHEL >= 7.4 */
+
+#if (RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE)
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#endif /* RHEL > 7.5 */
+
+#endif /* RHEL >= 7.0 && RHEL < 8.0 */
+
+/*****************************************************************************/
+#define HAVE_NDO_SET_VF_LINK_STATE
+#define HAVE_SKB_INNER_PROTOCOL
+#define HAVE_MPLS_FEATURES
+
+/*****************************************************************************/
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
+#define HAVE_NDO_GET_PHYS_PORT_ID
+#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK
+
+/*****************************************************************************/
+#define HAVE_VXLAN_CHECKS
+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0,
24))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#else
+#define HAVE_NDO_SELECT_QUEUE_ACCEL
+#endif
+#define HAVE_NET_GET_RANDOM_ONCE
+#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+
+/*****************************************************************************/
+
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+
+/*****************************************************************************/
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+
+/*****************************************************************************/
+#define HAVE_SKBUFF_CSUM_LEVEL
+#define HAVE_MULTI_VLAN_OFFLOAD_EN
+
+/*****************************************************************************/
+#define HAVE_RXFH_HASHFUNC
+
+/*****************************************************************************/
+
+/****************************************************************/
+
+/****************************************************************/
+
+/****************************************************************/
+
+/****************************************************************/
+
+/****************************************************************/
+
+#define HAVE_IO_MAP_WC_SIZE
+
+/*****************************************************************************/
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+
+/*****************************************************************************/
+#define HAVE_VOID_NDO_GET_STATS64
+#define HAVE_VM_OPS_FAULT_NO_VMA
+
+/*****************************************************************************/
+#define HAVE_HWTSTAMP_FILTER_NTP_ALL
+#define HAVE_NDO_SETUP_TC_CHAIN_INDEX
+#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+#define HAVE_PTP_CLOCK_DO_AUX_WORK
+
+/*****************************************************************************/
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+/*****************************************************************************/
+
+/*****************************************************************************/
+#define HAVE_TIMER_SETUP
+/*****************************************************************************/
+
+/*****************************************************************************/
+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
+/*****************************************************************************/
+
+/* vxlan outer udp checksum will offload and skb->inner_transport_header
+ * is wrong
+ */
+#if (SLE_VERSION_CODE && ((SLE_VERSION(12, 1, 0) == SLE_VERSION_CODE) || \
+ (SLE_VERSION(12, 0, 0) == SLE_VERSION_CODE))) || \
+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 0) == RHEL_RELEASE_CODE))
+#define HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
+#endif
+
+#define HAVE_ENCAPSULATION_TSO
+
+#define HAVE_ENCAPSULATION_CSUM
+
+#ifndef eth_zero_addr
+static inline void __kc_eth_zero_addr(u8 *addr)
+{
+ memset(addr, 0x00, ETH_ALEN);
+}
+
+#define eth_zero_addr(_addr) __kc_eth_zero_addr(_addr)
+#endif
+
+#ifndef netdev_hw_addr_list_for_each
+#define netdev_hw_addr_list_for_each(ha, l) \
+ list_for_each_entry(ha, &(l)->list, list)
+#endif
+
+#define spin_lock_deinit(lock)
+
+typedef struct file sdk_file;
+
+sdk_file *file_creat(char *file_name);
+
+sdk_file *file_open(char *file_name);
+
+void file_close(sdk_file *file_handle);
+
+u32 get_file_size(sdk_file *file_handle);
+
+void set_file_position(sdk_file *file_handle, u32 position);
+
+int file_read(sdk_file *file_handle, char *log_buffer,
+ u32 rd_length, u32 *file_pos);
+
+u32 file_write(sdk_file *file_handle, char *log_buffer, u32 wr_length);
+
+struct sdk_thread_info {
+ struct task_struct *thread_obj;
+ char *name;
+ void (*thread_fn)(void *x);
+ void *thread_event;
+ void *data;
+};
+
+int creat_thread(struct sdk_thread_info *thread_info);
+
+void stop_thread(struct sdk_thread_info *thread_info);
+
+#define destroy_work(work)
+void utctime_to_localtime(u64 utctime, u64 *localtime);
+#ifndef HAVE_TIMER_SETUP
+void initialize_timer(void *adapter_hdl, struct timer_list *timer);
+#endif
+void add_to_timer(struct timer_list *timer, long period);
+void stop_timer(struct timer_list *timer);
+void delete_timer(struct timer_list *timer);
+
+int local_atoi(const char *name);
+
+#define nicif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, "[NIC]"fmt, ##args)
+#define nicif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, "[NIC]"fmt, ##args)
+#define nicif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, "[NIC]"fmt, ##args)
+#define nicif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, "[NIC]"fmt, ##args)
+#define nicif_dbg(priv, type, dev, fmt, args...) \
+ netif_level(dbg, priv, type, dev, "[NIC]"fmt, ##args)
+
+#define destroy_completion(completion)
+#define sema_deinit(lock)
+#define mutex_deinit(lock)
+#define rwlock_deinit(lock)
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/ossl_types.h
b/drivers/net/ethernet/huawei/hinic/ossl_types.h
new file mode 100644
index 000000000000..b8591541bb7c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/ossl_types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * Statement:
+ * It must include "ossl_knl.h" or "ossl_user.h" before include
"ossl_types.h"
+ */
+
+#ifndef _OSSL_TYPES_H
+#define _OSSL_TYPES_H
+
+#undef NULL
+#if defined(__cplusplus)
+#define NULL 0
+#else
+#define NULL ((void *)0)
+#endif
+
+#define uda_handle void *
+
+#define UDA_TRUE 1
+#define UDA_FALSE 0
+
+#ifndef UINT8_MAX
+#define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */
+#define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */
+#define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */
+#define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */
+#define ASCII_MAX (0x7F)
+#endif
+
+#endif /* OSSL_TYPES_H */
--
2.31.0