From: Xue <xuechaojing(a)huawei.com>
commit 6cb2e756917d122560e0c52f350682760d004eec openEuler-1.0
driver inclusion
category:feature
bugzilla:4472
CVE:NA
------------------------------------------------------------------
Add NIC Layer support, include:
1. register net device to the kernel
2. implement all the hooks of the 'struct net_device_ops'
3. etc.
Reviewed-by: Chiqijun <chiqijun(a)huawei.com>
Signed-off-by: Xue <xuechaojing(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Signed-off-by: Xin Hao <haoxing990(a)gmail.com>
---
drivers/net/ethernet/huawei/hinic/Makefile | 4 +-
.../ethernet/huawei/hinic/hinic_dbgtool_knl.c | 845 ++++
.../ethernet/huawei/hinic/hinic_dbgtool_knl.h | 121 +
drivers/net/ethernet/huawei/hinic/hinic_dcb.c | 1253 ++++++
drivers/net/ethernet/huawei/hinic/hinic_dcb.h | 46 +
drivers/net/ethernet/huawei/hinic/hinic_lld.c | 2 +-
.../net/ethernet/huawei/hinic/hinic_main.c | 3497 +++++++++++++----
.../net/ethernet/huawei/hinic/hinic_nic_dev.h | 289 ++
.../net/ethernet/huawei/hinic/hinic_nictool.c | 2084 ++++++++++
.../net/ethernet/huawei/hinic/hinic_nictool.h | 239 ++
.../ethernet/huawei/hinic/hinic_pci_id_tbl.h | 39 +
drivers/net/ethernet/huawei/hinic/hinic_qp.c | 222 ++
drivers/net/ethernet/huawei/hinic/hinic_qp.h | 152 +
drivers/net/ethernet/huawei/hinic/hinic_rx.c | 1402 +++++--
drivers/net/ethernet/huawei/hinic/hinic_rx.h | 116 +-
.../net/ethernet/huawei/hinic/hinic_sriov.c | 459 +++
.../net/ethernet/huawei/hinic/hinic_sriov.h | 66 +
drivers/net/ethernet/huawei/hinic/hinic_tx.c | 1470 +++++--
drivers/net/ethernet/huawei/hinic/hinic_tx.h | 110 +-
19 files changed, 10914 insertions(+), 1502 deletions(-)
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dcb.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dcb.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nictool.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nictool.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_qp.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_qp.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sriov.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sriov.h
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile
b/drivers/net/ethernet/huawei/hinic/Makefile
index 09569d05c196..66df980fb585 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -6,4 +6,6 @@ hinic-y := hinic_nic_cfg.o hinic_nic_io.o hinic_nic_dbg.o \
hinic_wq.o hinic_cmdq.o hinic_hwdev.o hinic_cfg.o \
ossl_knl_linux.o \
hinic_sml_counter.o hinic_sml_lt.o \
- hinic_multi_host_mgmt.o
+ hinic_multi_host_mgmt.o hinic_main.o hinic_lld.o \
+ hinic_qp.o hinic_rx.o hinic_tx.o hinic_dbgtool_knl.o \
+ hinic_nictool.o hinic_sriov.o hinic_dcb.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
new file mode 100644
index 000000000000..805a101f6a30
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/if.h>
+#include <linux/ioctl.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_nic_dev.h"
+#include "hinic_dbgtool_knl.h"
+
+struct ffm_intr_info {
+ u8 node_id;
+ /* error level of the interrupt source */
+ u8 err_level;
+ /* Classification by interrupt source properties */
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+};
+
+#define HINIC_SELF_CMD_UP2PF_FFM 0x26
+
+void *g_card_node_array[MAX_CARD_NUM] = {0};
+void *g_card_vir_addr[MAX_CARD_NUM] = {0};
+u64 g_card_phy_addr[MAX_CARD_NUM] = {0};
+int card_id;
+
+/* dbgtool character device name, class name, dev path*/
+#define CHR_DEV_DBGTOOL "dbgtool_chr_dev"
+#define CLASS_DBGTOOL "dbgtool_class"
+#define DBGTOOL_DEV_PATH "/dev/dbgtool_chr_dev"
+
+struct dbgtool_k_glb_info {
+ struct semaphore dbgtool_sem;
+ struct ffm_record_info *ffm;
+};
+
+dev_t dbgtool_dev_id; /* device id */
+struct cdev dbgtool_chr_dev; /* struct of char device */
+
+/*lint -save -e104 -e808*/
+struct class *dbgtool_d_class; /* struct of char class */
+/*lint -restore*/
+
+int g_dbgtool_init_flag;
+int g_dbgtool_ref_cnt;
+
+static int dbgtool_knl_open(struct inode *pnode,
+ struct file *pfile)
+{
+ return 0;
+}
+
+static int dbgtool_knl_release(struct inode *pnode,
+ struct file *pfile)
+{
+ return 0;
+}
+
+static ssize_t dbgtool_knl_read(struct file *pfile,
+ char __user *ubuf,
+ size_t size,
+ loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t dbgtool_knl_write(struct file *pfile,
+ const char __user *ubuf,
+ size_t size,
+ loff_t *ppos)
+{
+ return 0;
+}
+
+int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vmsize = vma->vm_end - vma->vm_start;
+ phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ phys_addr_t phy_addr;
+
+ if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) {
+ pr_err("Map size = %lu is bigger than alloc\n", vmsize);
+ return -EAGAIN;
+ }
+
+ /* old version of tool set vma->vm_pgoff to 0 */
+ phy_addr = offset ? offset : g_card_phy_addr[card_id];
+
+ if (!phy_addr) {
+ pr_err("Card_id = %d physical address is 0\n", card_id);
+ return -EAGAIN;
+ }
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (phy_addr >> PAGE_SHIFT),
+ vmsize, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * dbgtool_knl_api_cmd_read - used for read operations
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ **/
+long dbgtool_knl_api_cmd_read(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ long ret;
+ u8 *cmd;
+ u16 size;
+ void *ack;
+ u16 ack_size;
+ u32 pf_id;
+ void *hwdev;
+
+ pf_id = para->param.api_rd.pf_id;
+ if (pf_id >= 16) {
+ pr_err("PF id(0x%x) too big\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* obtaining pf_id chipif pointer*/
+ hwdev = g_func_handle_array[pf_id];
+ if (!hwdev) {
+ pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* alloc cmd and ack memory*/
+ size = para->param.api_rd.size;
+ cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
+ if (!cmd) {
+ pr_err("Alloc read cmd mem fail\n");
+ return -ENOMEM;
+ }
+
+ ack_size = para->param.api_rd.ack_size;
+ ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL);
+ if (!ack) {
+ pr_err("Alloc read ack mem fail\n");
+ ret = -ENOMEM;
+ goto alloc_ack_mem_fail;
+ }
+
+ /* cmd content copied from user-mode */
+ if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) {
+ pr_err("Copy cmd from user fail\n");
+ ret = -EFAULT;
+ goto copy_user_cmd_fail;
+ }
+ /* Invoke the api cmd interface read content*/
+ ret = hinic_api_cmd_read_ack(hwdev, para->param.api_rd.dest,
+ cmd, size, ack, ack_size);
+ if (ret) {
+ pr_err("Api send single cmd ack fail!\n");
+ goto api_rd_fail;
+ }
+
+ /* Copy the contents of the ack to the user state*/
+ if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) {
+ pr_err("Copy ack to user fail\n");
+ ret = -EFAULT;
+ }
+api_rd_fail:
+copy_user_cmd_fail:
+ kfree(ack);
+alloc_ack_mem_fail:
+ kfree(cmd);
+ return ret;
+}
+
+/**
+ * dbgtool_knl_api_cmd_write - used for write operations
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ **/
+long dbgtool_knl_api_cmd_write(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ long ret;
+ u8 *cmd;
+ u16 size;
+ u32 pf_id;
+ void *hwdev;
+
+ pf_id = para->param.api_wr.pf_id;
+ if (pf_id >= 16) {
+ pr_err("PF id(0x%x) too big\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* obtaining chipif pointer according to pf_id */
+ hwdev = g_func_handle_array[pf_id];
+ if (!hwdev) {
+ pr_err("PF id(0x%x) handle null\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* alloc cmd memory*/
+ size = para->param.api_wr.size;
+ cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
+ if (!cmd) {
+ pr_err("Alloc write cmd mem fail\n");
+ return -ENOMEM;
+ }
+
+ /* cmd content copied from user-mode*/
+ if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) {
+ pr_err("Copy cmd from user fail\n");
+ ret = -EFAULT;
+ goto copy_user_cmd_fail;
+ }
+
+ /* api cmd interface is invoked to write the content*/
+ ret = hinic_api_cmd_write_nack(hwdev, para->param.api_wr.dest,
+ cmd, size);
+ if (ret)
+ pr_err("Api send single cmd nack fail\n");
+
+copy_user_cmd_fail:
+ kfree(cmd);
+ return ret;
+}
+
+void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx,
+ void **g_func_handle_array)
+{
+ u32 func_idx;
+ struct hinic_hwdev *hwdev;
+
+ if (!dev_info) {
+ pr_err("Params error! dev_info:%p\n", dev_info);
+ return;
+ }
+
+ /* pf at most 16*/
+ for (func_idx = 0; func_idx < 16; func_idx++) {
+ hwdev = (struct hinic_hwdev *)g_func_handle_array[func_idx];
+
+ dev_info[func_idx].phy_addr = g_card_phy_addr[card_idx];
+
+ if (!hwdev) {
+ dev_info[func_idx].bar0_size = 0;
+ dev_info[func_idx].bus = 0;
+ dev_info[func_idx].slot = 0;
+ dev_info[func_idx].func = 0;
+ } else {
+ dev_info[func_idx].bar0_size =
+ pci_resource_len
+ (((struct pci_dev *)hwdev->pcidev_hdl), 0);
+ dev_info[func_idx].bus =
+ ((struct pci_dev *)
+ hwdev->pcidev_hdl)->bus->number;
+ dev_info[func_idx].slot =
+ PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl)
+ ->devfn);
+ dev_info[func_idx].func =
+ PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl)
+ ->devfn);
+ }
+ }
+}
+
+/**
+ * dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ **/
+long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ struct pf_dev_info dev_info[16] = { {0} };
+ unsigned char *tmp;
+ int i;
+
+ if (!g_card_vir_addr[card_id]) {
+ g_card_vir_addr[card_id] =
+ (void *)__get_free_pages(GFP_KERNEL,
+ DBGTOOL_PAGE_ORDER);
+ if (!g_card_vir_addr[card_id]) {
+ pr_err("Alloc dbgtool api chain fail!\n");
+ return -EFAULT;
+ }
+
+ memset(g_card_vir_addr[card_id], 0,
+ PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
+
+ g_card_phy_addr[card_id] =
+ virt_to_phys(g_card_vir_addr[card_id]);
+ if (!g_card_phy_addr[card_id]) {
+ pr_err("phy addr for card %d is 0, vir_addr: 0x%p\n",
+ card_id, g_card_vir_addr[card_id]);
+ free_pages((unsigned long)g_card_vir_addr[card_id],
+ DBGTOOL_PAGE_ORDER);
+ g_card_vir_addr[card_id] = NULL;
+ return -EFAULT;
+ }
+
+ tmp = g_card_vir_addr[card_id];
+ for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
+ SetPageReserved(virt_to_page(tmp));
+ tmp += PAGE_SIZE;
+ }
+ }
+
+ chipif_get_all_pf_dev_info(dev_info, card_id, g_func_handle_array);
+
+ /* Copy the dev_info to user mode*/
+ if (copy_to_user(para->param.dev_info, dev_info,
+ (unsigned int)sizeof(dev_info))) {
+ pr_err("Copy dev_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * dbgtool_knl_ffm_info_rd - Read ffm information
+ * @para: the dbgtool parameter
+ * @dbgtool_info: the dbgtool info
+ **/
+long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para,
+ struct dbgtool_k_glb_info *dbgtool_info)
+{
+ /* Copy the ffm_info to user mode*/
+ if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm,
+ (unsigned int)sizeof(struct ffm_record_info))) {
+ pr_err("Copy ffm_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * dbgtool_knl_ffm_info_clr - Clear FFM information
+ * @para: the dbgtool parameter
+ * @dbgtool_info: the dbgtool info
+ **/
+long dbgtool_knl_ffm_info_clr(struct dbgtool_param *para,
+ struct dbgtool_k_glb_info *dbgtool_info)
+{
+ long ret = 0;
+
+ dbgtool_info->ffm->ffm_num = 0;
+
+ return ret;
+}
+
+/**
+ * dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ **/
+long dbgtool_knl_msg_to_up(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ long ret;
+ void *buf_in;
+ void *buf_out;
+ u16 out_size;
+ u8 pf_id;
+
+ pf_id = para->param.msg2up.pf_id;
+ /* pf at most 16*/
+ if (pf_id >= 16) {
+ pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id);
+ return -EFAULT;
+ }
+
+ if (!g_func_handle_array[pf_id]) {
+ pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* alloc buf_in and buf_out memory, apply for 2K*/
+ buf_in = kzalloc(2048ULL, GFP_KERNEL);
+ if (!buf_in) {
+ pr_err("Alloc buf_in mem fail\n");
+ return -ENOMEM;
+ }
+
+#define DBGTOOL_MSG_MAX_SIZE 2048ULL
+ buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0);
+ if (!buf_out) {
+ pr_err("Alloc buf_out mem fail\n");
+ ret = -ENOMEM;
+ goto alloc_buf_out_mem_fail;
+ }
+
+ /* copy buf_in from the user state*/
+ if (copy_from_user(buf_in, para->param.msg2up.buf_in,
+ (unsigned long)para->param.msg2up.in_size)) {
+ pr_err("Copy buf_in from user fail\n");
+ ret = -EFAULT;
+ goto copy_user_buf_in_fail;
+ }
+
+ out_size = DBGTOOL_MSG_MAX_SIZE;
+ /* Invoke the pf2up communication interface*/
+ ret = hinic_msg_to_mgmt_sync(g_func_handle_array[pf_id],
+ para->param.msg2up.mod,
+ para->param.msg2up.cmd,
+ buf_in,
+ para->param.msg2up.in_size,
+ buf_out,
+ &out_size,
+ 0);
+ /* 50s timeout time is sufficient*/
+
+ if (ret)
+ goto msg_2_up_fail;
+
+ /* Copy the out_size and buf_out content to user mode*/
+ if (copy_to_user(para->param.msg2up.out_size, &out_size,
+ (unsigned int)sizeof(out_size))) {
+ pr_err("Copy out_size to user fail\n");
+ ret = -EFAULT;
+ goto copy_out_size_fail;
+ }
+
+ if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) {
+ pr_err("Copy buf_out to user fail\n");
+ ret = -EFAULT;
+ }
+
+copy_out_size_fail:
+msg_2_up_fail:
+copy_user_buf_in_fail:
+ kfree(buf_out);
+alloc_buf_out_mem_fail:
+ kfree(buf_in);
+ return ret;
+}
+
+long dbgtool_knl_free_mem(int id)
+{
+ unsigned char *tmp;
+ int i;
+
+ if (!g_card_vir_addr[id])
+ return 0;
+
+ tmp = g_card_vir_addr[id];
+ for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
+ ClearPageReserved(virt_to_page(tmp));
+ tmp += PAGE_SIZE;
+ }
+
+ free_pages((unsigned long)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER);
+ g_card_vir_addr[id] = NULL;
+ g_card_phy_addr[id] = 0;
+
+ return 0;
+}
+
+/*lint -save -e771 -e794*/
+
+/**
+ * dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry
+ * @pfile: the pointer to file
+ * @cmd: the command type
+ **/
+long dbgtool_knl_unlocked_ioctl(struct file *pfile,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ long ret;
+ unsigned int real_cmd;
+ struct dbgtool_param param;
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct card_node *card_info = NULL;
+ int i;
+
+ (void)memset(¶m, 0, sizeof(param));
+
+ if (copy_from_user(¶m, (void *)arg, sizeof(param))) {
+ pr_err("Copy param from user fail\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_card_node_array[i];
+ if (!card_info)
+ continue;
+ if (!strncmp(param.chip_name, card_info->chip_name, IFNAMSIZ))
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Can't find this card %s\n", param.chip_name);
+ return -EFAULT;
+ }
+ card_id = i;
+ dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
+
+ down(&dbgtool_info->dbgtool_sem);
+
+ real_cmd = _IOC_NR(cmd);
+
+ switch (real_cmd) {
+ case DBGTOOL_CMD_API_RD:
+ ret = dbgtool_knl_api_cmd_read(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_API_WR:
+ ret = dbgtool_knl_api_cmd_write(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_FFM_RD:
+ ret = dbgtool_knl_ffm_info_rd(¶m, dbgtool_info);
+ break;
+ case DBGTOOL_CMD_FFM_CLR:
+ ret = dbgtool_knl_ffm_info_clr(¶m, dbgtool_info);
+ break;
+ case DBGTOOL_CMD_PF_DEV_INFO_GET:
+ ret = dbgtool_knl_pf_dev_info_get(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_MSG_2_UP:
+ ret = dbgtool_knl_msg_to_up(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_FREE_MEM:
+ ret = dbgtool_knl_free_mem(i);
+ break;
+ default:
+ pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd);
+ ret = -EFAULT;
+ }
+
+ up(&dbgtool_info->dbgtool_sem);
+ return ret;
+}
+
+/**
+ * ffm_intr_msg_record - FFM interruption records sent up
+ * @handle: the function handle
+ * @buf_in: the pointer to input buffer
+ * @buf_out: the pointer to outputput buffer
+ **/
+void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct ffm_intr_info *intr;
+ u32 ffm_idx;
+ struct timex txc;
+ struct rtc_time rctm;
+ struct card_node *card_info = NULL;
+ int i, j;
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_card_node_array[i];
+ if (!card_info)
+ continue;
+
+ for (j = 0; j < MAX_FUNCTION_NUM; j++) {
+ if (handle == card_info->func_handle_array[j])
+ break;
+ }
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Id(%d) cant find this card\n", i);
+ return;
+ }
+
+ dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
+ if (!dbgtool_info) {
+ pr_err("Dbgtool info is null\n");
+ return;
+ }
+
+ intr = (struct ffm_intr_info *)buf_in;
+
+ if (!dbgtool_info->ffm)
+ return;
+
+ ffm_idx = dbgtool_info->ffm->ffm_num;
+ if (ffm_idx < FFM_RECORD_NUM_MAX) {
+ pr_info("%s: recv intr, ffm_idx: %d\n", __func__, ffm_idx);
+
+ dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id;
+ dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level;
+ dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type;
+ dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr =
+ intr->err_csr_addr;
+ dbgtool_info->ffm->ffm[ffm_idx].err_csr_value =
+ intr->err_csr_value;
+
+ /* Obtain the current UTC time*/
+ do_gettimeofday(&txc.time);
+
+ /* Calculate the time in date value to tm*/
+ rtc_time_to_tm((unsigned long)txc.time.tv_sec +
+ 60 * 60 * 8, &rctm);
+
+ /* tm_year starts from 1900; 0->1900, 1->1901, and so on */
+ dbgtool_info->ffm->ffm[ffm_idx].year =
+ (u16)(rctm.tm_year + 1900);
+ /* tm_mon starts from 0, 0 indicates January, and so on */
+ dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1;
+ dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday;
+ dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour;
+ dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min;
+ dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec;
+
+ dbgtool_info->ffm->ffm_num++;
+ }
+}
+
+/*lint -restore*/
+
+/*lint -save -e785 -e438*/
+static const struct file_operations dbgtool_file_operations = {
+ .owner = THIS_MODULE,
+ .open = dbgtool_knl_open,
+ .release = dbgtool_knl_release,
+ .read = dbgtool_knl_read,
+ .write = dbgtool_knl_write,
+ .unlocked_ioctl = dbgtool_knl_unlocked_ioctl,
+ .mmap = hinic_mem_mmap,
+};
+
+/**
+ * dbgtool_knl_init - dbgtool character device init
+ * @hwdev: the pointer to hardware device
+ * @chip_node: the pointer to card node
+ **/
+int dbgtool_knl_init(void *vhwdev, void *chip_node)
+{
+ int ret = 0;
+ int id;
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct device *pdevice;
+ struct card_node *chip_info = (struct card_node *)chip_node;
+ struct hinic_hwdev *hwdev = vhwdev;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj,
+ &chip_info->dbgtool_attr_file);
+ if (ret) {
+ pr_err("Failed to sysfs create file\n");
+ return ret;
+ }
+
+ chip_info->func_handle_array[hinic_global_func_id(hwdev)] = hwdev;
+
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev, HINIC_SELF_CMD_UP2PF_FFM,
+ ffm_intr_msg_record);
+
+ if (chip_info->dbgtool_info) {
+ chip_info->func_num++;
+ return 0;
+ }
+
+ dbgtool_info = (struct dbgtool_k_glb_info *)
+ kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL);
+ if (!dbgtool_info) {
+ pr_err("Failed to allocate dbgtool_info\n");
+ goto dbgtool_info_fail;
+ }
+ chip_info->dbgtool_info = dbgtool_info;
+
+ /*FFM init*/
+ dbgtool_info->ffm = (struct ffm_record_info *)
+ kzalloc(sizeof(struct ffm_record_info),
+ GFP_KERNEL);
+ if (!dbgtool_info->ffm) {
+ pr_err("Failed to allocate cell contexts for a chain\n");
+ goto dbgtool_info_ffm_fail;
+ }
+
+ sema_init(&dbgtool_info->dbgtool_sem, 1);
+
+ ret = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id);
+ if (ret < 0)
+ pr_err("Failed to get hinic id\n");
+
+ g_card_node_array[id] = chip_info;
+ chip_info->func_num++;
+
+ if (g_dbgtool_init_flag) {
+ g_dbgtool_ref_cnt++;
+ /* already initialized */
+ return 0;
+ }
+
+ /*alloc device id*/
+ ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL);
+ if (ret) {
+ pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret);
+ goto alloc_chdev_fail;
+ }
+
+ /*init device*/
+ cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations);
+
+ /*add device*/
+ ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1);
+ if (ret) {
+ pr_err("Add dgbtool dev fail, ret=0x%x\n", ret);
+ goto cdev_add_fail;
+ }
+
+ /*lint -save -e160*/
+ dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL);
+ /*lint -restore*/
+ if (IS_ERR(dbgtool_d_class)) {
+ pr_err("Create dgbtool class fail\n");
+ ret = -EFAULT;
+ goto cls_create_fail;
+ }
+
+ /* Export device information to user space
+ * (/sys/class/class name/device name)
+ */
+ pdevice = device_create(dbgtool_d_class, NULL,
+ dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL);
+ if (IS_ERR(pdevice)) {
+ pr_err("Create dgbtool device fail\n");
+ ret = -EFAULT;
+ goto dev_create_fail;
+ }
+ g_dbgtool_init_flag = 1;
+ g_dbgtool_ref_cnt = 1;
+
+ return 0;
+
+dev_create_fail:
+ class_destroy(dbgtool_d_class);
+cls_create_fail:
+ cdev_del(&(dbgtool_chr_dev));
+cdev_add_fail:
+ unregister_chrdev_region(dbgtool_dev_id, 1);
+alloc_chdev_fail:
+ g_card_node_array[id] = NULL;
+ kfree(dbgtool_info->ffm);
+dbgtool_info_ffm_fail:
+ kfree(dbgtool_info);
+ dbgtool_info = NULL;
+ chip_info->dbgtool_info = NULL;
+dbgtool_info_fail:
+ hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM);
+ chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL;
+ sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
+ &chip_info->dbgtool_attr_file);
+ return ret;
+}
+
+/**
+ * dbgtool_knl_deinit - dbgtool character device deinit
+ * @hwdev: the pointer to hardware device
+ * @chip_node: the pointer to card node
+ **/
+void dbgtool_knl_deinit(void *vhwdev, void *chip_node)
+{
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct card_node *chip_info = (struct card_node *)chip_node;
+ int id;
+ int err;
+ struct hinic_hwdev *hwdev = vhwdev;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return;
+
+ hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM);
+
+ chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL;
+
+ sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
+ &chip_info->dbgtool_attr_file);
+
+ chip_info->func_num--;
+ if (chip_info->func_num)
+ return;
+
+ err = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id);
+ if (err < 0)
+ pr_err("Failed to get hinic id\n");
+
+ g_card_node_array[id] = NULL;
+
+ dbgtool_info = chip_info->dbgtool_info;
+ /*FFM deinit*/
+ kfree(dbgtool_info->ffm);
+ dbgtool_info->ffm = NULL;
+
+ kfree(dbgtool_info);
+ chip_info->dbgtool_info = NULL;
+
+ (void)dbgtool_knl_free_mem(id);
+
+ if (g_dbgtool_init_flag) {
+ if ((--g_dbgtool_ref_cnt))
+ return;
+ }
+
+ if (!dbgtool_d_class)
+ return;
+
+ device_destroy(dbgtool_d_class, dbgtool_dev_id);
+ class_destroy(dbgtool_d_class);
+ dbgtool_d_class = NULL;
+
+ cdev_del(&(dbgtool_chr_dev));
+ unregister_chrdev_region(dbgtool_dev_id, 1);
+
+ g_dbgtool_init_flag = 0;
+} /*lint -restore*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h
b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h
new file mode 100644
index 000000000000..833d3fd60d51
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __DBGTOOL_KNL_H__
+#define __DBGTOOL_KNL_H__
+
+#define DBG_TOOL_MAGIC 'w'
+
+/* dbgtool command type */
+/* You can add the required dbgtool through these commands
+ * can invoke all X86 kernel mode driver interface
+ */
+typedef enum {
+ DBGTOOL_CMD_API_RD = 0,
+ DBGTOOL_CMD_API_WR,
+
+ DBGTOOL_CMD_FFM_RD,
+ DBGTOOL_CMD_FFM_CLR,
+
+ DBGTOOL_CMD_PF_DEV_INFO_GET,
+
+ DBGTOOL_CMD_MSG_2_UP,
+
+ DBGTOOL_CMD_FREE_MEM,
+ DBGTOOL_CMD_NUM
+} dbgtool_cmd;
+
+struct api_cmd_rd {
+ u32 pf_id;
+ u8 dest;
+ u8 *cmd;
+ u16 size;
+ void *ack;
+ u16 ack_size;
+};
+
+struct api_cmd_wr {
+ u32 pf_id;
+ u8 dest;
+ u8 *cmd;
+ u16 size;
+};
+
+struct pf_dev_info {
+ u64 bar0_size;
+ u8 bus;
+ u8 slot;
+ u8 func;
+ u64 phy_addr;
+};
+
+/* Interrupt at most records, interrupt will be recorded in the FFM*/
+#define FFM_RECORD_NUM_MAX 64
+
+struct ffm_intr_tm_info {
+ u8 node_id;
+ /* error level of the interrupt source */
+ u8 err_level;
+ /* Classification by interrupt source properties */
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+
+ u8 sec; /* second*/
+ u8 min; /* minute */
+ u8 hour; /* hour */
+ u8 mday; /* day */
+ u8 mon; /* month */
+ u16 year; /* year */
+};
+
+struct ffm_record_info {
+ u32 ffm_num;
+ struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX];
+};
+
+struct msg_2_up {
+ u8 pf_id; /* which pf sends messages to the up*/
+ u8 mod;
+ u8 cmd;
+ void *buf_in;
+ u16 in_size;
+ void *buf_out;
+ u16 *out_size;
+};
+
+struct dbgtool_param {
+ union {
+ struct api_cmd_rd api_rd;
+ struct api_cmd_wr api_wr;
+ struct pf_dev_info *dev_info;
+ struct ffm_record_info *ffm_rd;
+ struct msg_2_up msg2up;
+ } param;
+ char chip_name[16];
+};
+
+#ifndef MAX_CARD_NUM
+#define MAX_CARD_NUM 64
+#endif
+#define DBGTOOL_PAGE_ORDER 10
+
+int dbgtool_knl_init(void *vhwdev, void *chip_node);
+void dbgtool_knl_deinit(void *vhwdev, void *chip_node);
+int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma);
+void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id,
+ void **g_func_handle_array);
+long dbgtool_knl_free_mem(int id);
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
new file mode 100644
index 000000000000..eb1cb657cb52
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_lld.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
+#include "hinic_dcb.h"
+
+#define DCB_HW_CFG_CHG 0
+#define DCB_HW_CFG_NO_CHG 1
+#define DCB_HW_CFG_ERR 2
+
+#define DCB_CFG_CHG_PG_TX 0x1
+#define DCB_CFG_CHG_PG_RX 0x2
+#define DCB_CFG_CHG_PFC 0x4
+#define DCB_CFG_CHG_UP_COS 0x8
+
+u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up)
+{
+ struct hinic_tc_cfg *tc_cfg = &dcb_cfg->tc_cfg[0];
+ u8 tc = dcb_cfg->pg_tcs;
+
+ if (!tc)
+ return 0;
+
+ for (tc--; tc; tc--) {
+ if (BIT(up) & tc_cfg[tc].path[dir].up_map)
+ break;
+ }
+
+ return tc;
+}
+
+#define UP_MAPPING(prio) ((u8)(1U << ((HINIC_DCB_UP_MAX - 1) - (prio))))
+
+void hinic_dcb_config_init(struct hinic_nic_dev *nic_dev,
+ struct hinic_dcb_config *dcb_cfg)
+{
+ struct hinic_tc_cfg *tc;
+ int i;
+
+ memset(dcb_cfg->tc_cfg, 0, sizeof(dcb_cfg->tc_cfg));
+ tc = &dcb_cfg->tc_cfg[0];
+ /* All TC mapping to PG0 */
+ for (i = 0; i < dcb_cfg->pg_tcs; i++) {
+ tc = &dcb_cfg->tc_cfg[i];
+ tc->path[HINIC_DCB_CFG_TX].pg_id = 0;
+ tc->path[HINIC_DCB_CFG_TX].bw_pct = 100;
+ tc->path[HINIC_DCB_CFG_TX].up_map = UP_MAPPING(i);
+ tc->path[HINIC_DCB_CFG_RX].pg_id = 0;
+ tc->path[HINIC_DCB_CFG_RX].bw_pct = 100;
+ tc->path[HINIC_DCB_CFG_RX].up_map = UP_MAPPING(i);
+
+ tc->pfc_en = false;
+ }
+
+ for (; i < HINIC_DCB_UP_MAX; i++) {
+ tc->path[HINIC_DCB_CFG_TX].up_map |= UP_MAPPING(i);
+ tc->path[HINIC_DCB_CFG_RX].up_map |= UP_MAPPING(i);
+ }
+
+ memset(dcb_cfg->bw_pct, 0, sizeof(dcb_cfg->bw_pct));
+ /* Use PG0 in default, PG0's bw is 100% */
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][0] = 100;
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][0] = 100;
+ dcb_cfg->pfc_state = false;
+}
+
+static int hinic_set_up_cos_map(struct hinic_nic_dev *nic_dev,
+ u8 num_cos, u8 *cos_up)
+{
+ u8 up_valid_bitmap, up_cos[HINIC_DCB_UP_MAX] = {0};
+ u8 i;
+
+ up_valid_bitmap = 0;
+ for (i = 0; i < num_cos; i++) {
+ if (cos_up[i] >= HINIC_DCB_UP_MAX) {
+ hinic_info(nic_dev, drv, "Invalid up %d mapping to cos %d\n",
+ cos_up[i], i);
+ return -EFAULT;
+ }
+
+ if (i > 0 && cos_up[i] >= cos_up[i - 1]) {
+ hinic_info(nic_dev, drv,
+ "Invalid priority order, should be descending cos[%d]=%d,
cos[%d]=%d\n",
+ i, cos_up[i], i - 1, cos_up[i - 1]);
+ return -EINVAL;
+ }
+
+ up_valid_bitmap |= (u8)BIT(cos_up[i]);
+ if (i == (num_cos - 1))
+ up_cos[cos_up[i]] = nic_dev->default_cos_id;
+ else
+ up_cos[cos_up[i]] = i; /* reverse up and cos */
+ }
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (up_valid_bitmap & (u8)BIT(i))
+ continue;
+
+ up_cos[i] = nic_dev->default_cos_id;
+ }
+
+ nic_dev->up_valid_bitmap = up_valid_bitmap;
+ memcpy(nic_dev->up_cos, up_cos, sizeof(up_cos));
+
+ return hinic_sq_cos_mapping(nic_dev->netdev);
+}
+
+static int hinic_init_up_cos_map(struct hinic_nic_dev *nic_dev, u8 num_cos)
+{
+ u8 default_map[HINIC_DCB_COS_MAX] = {0};
+ bool setted = false;
+ u8 max_cos, cos_id, up;
+ int err;
+
+ max_cos = hinic_max_num_cos(nic_dev->hwdev);
+ if (!max_cos || ((max_cos - 1) < nic_dev->default_cos_id)) {
+ hinic_err(nic_dev, drv, "Max_cos is %d, default cos id %d\n",
+ max_cos, nic_dev->default_cos_id);
+ return -EFAULT;
+ }
+
+ err = hinic_get_chip_cos_up_map(nic_dev->pdev, &setted, default_map);
+ if (err) {
+ hinic_err(nic_dev, drv, "Get chip cos_up map failed\n");
+ return -EFAULT;
+ }
+
+ if (!setted) {
+ /* Use (max_cos-1)~0 as default user priority and mapping
+ * to cos0~(max_cos-1)
+ */
+ up = nic_dev->max_cos - 1;
+ for (cos_id = 0; cos_id < nic_dev->max_cos; cos_id++, up--)
+ default_map[cos_id] = up;
+ }
+
+ return hinic_set_up_cos_map(nic_dev, num_cos, default_map);
+}
+
+int hinic_dcb_init(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ u8 num_cos, support_cos = 0, default_cos = 0;
+ u8 i, cos_valid_bitmap;
+ int err;
+
+ if (HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ return 0;
+
+ cos_valid_bitmap = hinic_cos_valid_bitmap(nic_dev->hwdev);
+ if (!cos_valid_bitmap) {
+ hinic_err(nic_dev, drv, "None cos supported\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
+ if (cos_valid_bitmap & BIT(i)) {
+ support_cos++;
+ default_cos = i;/* Find max cos id as default cos */
+ }
+ }
+
+ hinic_info(nic_dev, drv, "Support num cos %d, default cos %d\n",
+ support_cos, default_cos);
+
+ num_cos = (u8)(1U << ilog2(support_cos));
+ if (num_cos != support_cos)
+ hinic_info(nic_dev, drv, "Adjust num_cos from %d to %d\n",
+ support_cos, num_cos);
+
+ nic_dev->dcbx_cap = 0;
+ nic_dev->max_cos = num_cos;
+ nic_dev->default_cos_id = default_cos;
+ dcb_cfg->pfc_tcs = nic_dev->max_cos;
+ dcb_cfg->pg_tcs = nic_dev->max_cos;
+ err = hinic_init_up_cos_map(nic_dev, num_cos);
+ if (err) {
+ hinic_info(nic_dev, drv, "Initialize up_cos mapping failed\n");
+ return -EFAULT;
+ }
+
+ hinic_dcb_config_init(nic_dev, dcb_cfg);
+
+ nic_dev->dcb_changes = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX |
+ DCB_CFG_CHG_PG_RX | DCB_CFG_CHG_UP_COS;
+ nic_dev->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+
+ memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->dcb_cfg,
+ sizeof(nic_dev->tmp_dcb_cfg));
+ memcpy(&nic_dev->save_dcb_cfg, &nic_dev->dcb_cfg,
+ sizeof(nic_dev->save_dcb_cfg));
+
+ sema_init(&nic_dev->dcb_sem, 1);
+
+ return 0;
+}
+
+void hinic_set_prio_tc_map(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ u8 prio, tc;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ for (prio = 0; prio < HINIC_DCB_UP_MAX; prio++) {
+ tc = nic_dev->up_cos[prio];
+ if (tc == nic_dev->default_cos_id)
+ tc = nic_dev->max_cos - 1;
+
+ netdev_set_prio_tc_map(netdev, prio, tc);
+ }
+}
+
+int hinic_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if (!FUNC_SUPPORT_DCB(nic_dev->hwdev)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Current function don't support DCB\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (tc > nic_dev->dcb_cfg.pg_tcs) {
+ nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %d, max tc: %d\n",
+ tc, nic_dev->dcb_cfg.pg_tcs);
+ return -EINVAL;
+ }
+
+ if (netif_running(netdev)) {
+ err = hinic_close(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to close device\n");
+ return -EFAULT;
+ }
+ }
+
+ if (tc) {
+ /* TODO: verify if num_tc should be power of 2 */
+ if (tc & (tc - 1)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Invalid num_tc: %d, must be power of 2\n",
+ tc);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(netdev, tc);
+ hinic_set_prio_tc_map(nic_dev);
+
+ set_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ } else {
+ netdev_reset_tc(netdev);
+
+ clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ }
+
+ hinic_sq_cos_mapping(netdev);
+
+ if (netif_running(netdev)) {
+ err = hinic_open(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to open device\n");
+ return -EFAULT;
+ }
+ } else {
+ /* will implement hinic_update_num_qps() in next patch */
+ }
+
+ hinic_configure_dcb(netdev);
+
+ return 0;
+}
+
+static u8 hinic_dcbnl_get_state(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+}
+
+static u8 hinic_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 curr_state = !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ int err = 0;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ if (state == curr_state)
+ return 0;
+
+ err = hinic_setup_tc(netdev, state ? nic_dev->dcb_cfg.pg_tcs : 0);
+ if (!err)
+ netif_info(nic_dev, drv, netdev, "%s DCB\n",
+ state ? "Enable" : "Disable");
+
+ return !!err;
+}
+
+static void hinic_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ err = hinic_get_default_mac(nic_dev->hwdev, perm_addr);
+ if (err)
+ nicif_err(nic_dev, drv, netdev, "Failed to get default mac\n");
+}
+
+static void hinic_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 prio, u8 pg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (prio != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].prio_type = prio;
+ if (pg_id != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].pg_id = pg_id;
+ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].bw_pct = bw_pct;
+ /* if all priority mapping to the same tc,
+ * up_map is 0xFF, and it's a valid value
+ */
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].up_map = up_map;
+}
+
+static void hinic_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
+ u8 bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->tmp_dcb_cfg.bw_pct[0][bwg_id] = bw_pct;
+}
+
+static void hinic_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
+ u8 prio, u8 pg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (prio != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].prio_type = prio;
+ if (pg_id != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].pg_id = pg_id;
+ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].bw_pct = bw_pct;
+
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].up_map = up_map;
+}
+
+static void hinic_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
+ u8 bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->tmp_dcb_cfg.bw_pct[1][bwg_id] = bw_pct;
+}
+
+static void hinic_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 *prio, u8 *pg_id, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[0].prio_type;
+ *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[0].pg_id;
+ *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[0].bw_pct;
+ *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[0].up_map;
+}
+
+static void hinic_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
+ u8 *bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ *bw_pct = nic_dev->dcb_cfg.bw_pct[0][bwg_id];
+}
+
+static void hinic_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
+ u8 *prio, u8 *pg_id, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[1].prio_type;
+ *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[1].pg_id;
+ *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[1].bw_pct;
+ *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[1].up_map;
+}
+
+static void hinic_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
+ u8 *bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ *bw_pct = nic_dev->dcb_cfg.bw_pct[1][bwg_id];
+}
+
+static void hinic_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en = !!setting;
+ if (nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en !=
+ nic_dev->dcb_cfg.tc_cfg[prio].pfc_en)
+ nic_dev->tmp_dcb_cfg.pfc_state = true;
+}
+
+static void hinic_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ *setting = nic_dev->dcb_cfg.tc_cfg[prio].pfc_en;
+}
+
+static u8 hinic_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = nic_dev->dcbx_cap;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+static u8 hinic_sync_tc_cfg(struct hinic_tc_cfg *tc_dst,
+ struct hinic_tc_cfg *tc_src, int dir)
+{
+ u8 tc_dir_change = (dir == HINIC_DCB_CFG_TX) ?
+ DCB_CFG_CHG_PG_TX : DCB_CFG_CHG_PG_RX;
+ u8 changes = 0;
+
+ if (tc_dst->path[dir].prio_type != tc_src->path[dir].prio_type) {
+ tc_dst->path[dir].prio_type = tc_src->path[dir].prio_type;
+ changes |= tc_dir_change;
+ }
+
+ if (tc_dst->path[dir].pg_id != tc_src->path[dir].pg_id) {
+ tc_dst->path[dir].pg_id = tc_src->path[dir].pg_id;
+ changes |= tc_dir_change;
+ }
+
+ if (tc_dst->path[dir].bw_pct != tc_src->path[dir].bw_pct) {
+ tc_dst->path[dir].bw_pct = tc_src->path[dir].bw_pct;
+ changes |= tc_dir_change;
+ }
+
+ if (tc_dst->path[dir].up_map != tc_src->path[dir].up_map) {
+ tc_dst->path[dir].up_map = tc_src->path[dir].up_map;
+ changes |= (tc_dir_change | DCB_CFG_CHG_PFC);
+ }
+
+ return changes;
+}
+
+static u8 hinic_sync_dcb_cfg(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct hinic_dcb_config *tmp_dcb_cfg = &nic_dev->tmp_dcb_cfg;
+ struct hinic_tc_cfg *tc_dst, *tc_src;
+ u8 changes = 0;
+ int i;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ tc_src = &tmp_dcb_cfg->tc_cfg[i];
+ tc_dst = &dcb_cfg->tc_cfg[i];
+
+ changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_TX);
+ changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_RX);
+ }
+
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++) {
+ if (dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] !=
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]) {
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] =
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
+ changes |= DCB_CFG_CHG_PG_TX;
+ }
+
+ if (dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] !=
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i]) {
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] =
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i];
+ changes |= DCB_CFG_CHG_PG_RX;
+ }
+ }
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (dcb_cfg->tc_cfg[i].pfc_en !=
+ tmp_dcb_cfg->tc_cfg[i].pfc_en) {
+ dcb_cfg->tc_cfg[i].pfc_en =
+ tmp_dcb_cfg->tc_cfg[i].pfc_en;
+ changes |= DCB_CFG_CHG_PFC;
+ }
+ }
+
+ if (dcb_cfg->pfc_state != tmp_dcb_cfg->pfc_state) {
+ dcb_cfg->pfc_state = tmp_dcb_cfg->pfc_state;
+ changes |= DCB_CFG_CHG_PFC;
+ }
+
+ return changes;
+}
+
+static void hinic_dcb_get_pfc_map(struct hinic_nic_dev *nic_dev,
+ struct hinic_dcb_config *dcb_cfg, u8 *pfc_map)
+{
+ int i, up;
+ u8 pfc_en = 0, outof_range_pfc = 0;
+
+ for (i = 0; i < dcb_cfg->pfc_tcs; i++) {
+ up = (HINIC_DCB_UP_MAX - 1) - i;
+ if (dcb_cfg->tc_cfg[up].pfc_en)
+ *pfc_map |= (u8)BIT(up);
+ }
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ up = (HINIC_DCB_UP_MAX - 1) - i;
+ if (dcb_cfg->tc_cfg[up].pfc_en)
+ pfc_en |= (u8)BIT(up);
+ }
+
+ *pfc_map = pfc_en & nic_dev->up_valid_bitmap;
+ outof_range_pfc = pfc_en & (~nic_dev->up_valid_bitmap);
+
+ if (outof_range_pfc)
+ hinic_info(nic_dev, drv,
+ "PFC setting out of range, 0x%x will be ignored\n",
+ outof_range_pfc);
+}
+
+static bool is_cos_in_use(u8 cos, u8 up_valid_bitmap, u8 *up_cos)
+{
+ int i;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(up_valid_bitmap & BIT(i)))
+ continue;
+
+ if (cos == up_cos[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void hinic_dcb_adjust_up_bw(struct hinic_nic_dev *nic_dev, u8 *up_pgid,
+ u8 *up_bw)
+{
+ u8 tmp_cos, pg_id;
+ u16 bw_all;
+ u8 bw_remain, cos_cnt;
+
+ for (pg_id = 0; pg_id < HINIC_DCB_PG_MAX; pg_id++) {
+ bw_all = 0;
+ cos_cnt = 0;
+ /* Find all up mapping to the same pg */
+ for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) {
+ if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap,
+ nic_dev->up_cos))
+ continue;
+
+ if (up_pgid[tmp_cos] == pg_id) {
+ bw_all += up_bw[tmp_cos];
+ cos_cnt++;
+ }
+ }
+
+ if (bw_all <= 100 || !cos_cnt)
+ continue;
+
+ /* Calculate up percent of bandwidth group, The sum of
+ * percentages for priorities in the same priority group
+ * must be 100
+ */
+ bw_remain = 100 % cos_cnt;
+ for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) {
+ if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap,
+ nic_dev->up_cos))
+ continue;
+
+ if (up_pgid[tmp_cos] == pg_id) {
+ up_bw[tmp_cos] =
+ (u8)(100 * up_bw[tmp_cos] / bw_all +
+ (u8)!!bw_remain);
+ if (bw_remain)
+ bw_remain--;
+ }
+ }
+ }
+}
+
+static void hinic_dcb_dump_configuration(struct hinic_nic_dev *nic_dev,
+ u8 *up_tc, u8 *up_pgid, u8 *up_bw,
+ u8 *pg_bw, u8 *up_strict, u8 *bw_pct)
+{
+ u8 i;
+ u8 cos;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(i)))
+ continue;
+
+ cos = nic_dev->up_cos[i];
+ hinic_info(nic_dev, drv,
+ "up: %d, cos: %d, tc: %d, pgid: %d, bw: %d, tsa: %d\n",
+ i, cos, up_tc[cos], up_pgid[cos], up_bw[cos],
+ up_strict[cos]);
+ }
+
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++)
+ hinic_info(nic_dev, drv, "pgid: %d, bw: %d\n", i, pg_bw[i]);
+}
+
+/* Ucode thread timeout is 210ms, must be lagger then 210ms */
+#define HINIC_WAIT_PORT_IO_STOP 250
+
+static int hinic_stop_port_traffic_flow(struct hinic_nic_dev *nic_dev)
+{
+ int err = 0;
+
+ down(&nic_dev->dcb_sem);
+
+ if (nic_dev->disable_port_cnt++ != 0)
+ goto out;
+
+ err = hinic_force_port_disable(nic_dev);
+ if (err) {
+ hinic_err(nic_dev, drv, "Failed to disable port\n");
+ goto set_port_err;
+ }
+
+ err = hinic_set_port_funcs_state(nic_dev->hwdev, false);
+ if (err) {
+ hinic_err(nic_dev, drv,
+ "Failed to disable all functions in port\n");
+ goto set_port_funcs_err;
+ }
+
+ hinic_info(nic_dev, drv, "Stop port traffic flow\n");
+
+ goto out;
+
+set_port_funcs_err:
+ hinic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev));
+
+set_port_err:
+out:
+ if (err)
+ nic_dev->disable_port_cnt--;
+
+ up(&nic_dev->dcb_sem);
+
+ return err;
+}
+
+static int hinic_start_port_traffic_flow(struct hinic_nic_dev *nic_dev)
+{
+ int err;
+
+ down(&nic_dev->dcb_sem);
+
+ nic_dev->disable_port_cnt--;
+ if (nic_dev->disable_port_cnt > 0) {
+ up(&nic_dev->dcb_sem);
+ return 0;
+ }
+
+ nic_dev->disable_port_cnt = 0;
+ up(&nic_dev->dcb_sem);
+
+ err = hinic_force_set_port_state(nic_dev,
+ !!netif_running(nic_dev->netdev));
+ if (err)
+ hinic_err(nic_dev, drv, "Failed to disable port\n");
+
+ err = hinic_set_port_funcs_state(nic_dev->hwdev, true);
+ if (err)
+ hinic_err(nic_dev, drv,
+ "Failed to disable all functions in port\n");
+
+ hinic_info(nic_dev, drv, "Start port traffic flow\n");
+
+ return err;
+}
+
+static int __set_hw_cos_up_map(struct hinic_nic_dev *nic_dev)
+{
+ u8 cos, cos_valid_bitmap, cos_up_map[HINIC_DCB_COS_MAX] = {0};
+ u8 i;
+ int err;
+
+ cos_valid_bitmap = 0;
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(i)))
+ continue;
+
+ cos = nic_dev->up_cos[i];
+ cos_up_map[cos] = i;
+ cos_valid_bitmap |= (u8)BIT(cos);
+ }
+
+ err = hinic_dcb_set_cos_up_map(nic_dev->hwdev, cos_valid_bitmap,
+ cos_up_map);
+ if (err) {
+ hinic_info(nic_dev, drv, "Set cos_up map failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __set_hw_ets(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct hinic_tc_attr *tc_attr;
+ u8 up_tc[HINIC_DCB_UP_MAX] = {0};
+ u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
+ u8 up_bw[HINIC_DCB_UP_MAX] = {0};
+ u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
+ u8 up_strict[HINIC_DCB_UP_MAX] = {0};
+ u8 i, tc, cos;
+ int err;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(i)))
+ continue;
+
+ cos = nic_dev->up_cos[i];
+ tc = hinic_dcb_get_tc(dcb_cfg, HINIC_DCB_CFG_TX, i);
+ tc_attr = &dcb_cfg->tc_cfg[tc].path[HINIC_DCB_CFG_TX];
+ up_tc[cos] = tc;
+ up_pgid[cos] = tc_attr->pg_id;
+ up_bw[cos] = tc_attr->bw_pct;
+ up_strict[cos] = tc_attr->prio_type ?
+ HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
+ }
+
+ hinic_dcb_adjust_up_bw(nic_dev, up_pgid, up_bw);
+
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++)
+ pg_bw[i] = dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ hinic_dcb_dump_configuration(nic_dev, up_tc, up_pgid,
+ up_bw, pg_bw, up_strict,
+ pg_bw);
+
+ err = hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, up_pgid,
+ up_bw, up_strict);
+ if (err) {
+ hinic_err(nic_dev, drv, "Failed to set ets\n");
+ return err;
+ }
+
+ hinic_info(nic_dev, drv, "Set ets to hw done\n");
+
+ return 0;
+}
+
+u8 hinic_dcbnl_set_all(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ u8 state = DCB_HW_CFG_CHG;
+ int err;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return DCB_HW_CFG_ERR;
+
+ nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev);
+ if (!nic_dev->dcb_changes)
+ return DCB_HW_CFG_NO_CHG;
+
+ err = hinic_stop_port_traffic_flow(nic_dev);
+ if (err)
+ return DCB_HW_CFG_ERR;
+ /* wait all traffic flow stopped */
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) {
+ err = __set_hw_cos_up_map(nic_dev);
+ if (err) {
+ hinic_info(nic_dev, drv,
+ "Set cos_up map to hardware failed\n");
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS);
+ }
+
+ if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) {
+ err = __set_hw_ets(nic_dev);
+ if (err) {
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ nic_dev->dcb_changes &=
+ (~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX));
+ }
+
+ if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) {
+ u8 pfc_map = 0;
+
+ hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map);
+ err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state,
+ pfc_map);
+ if (err) {
+ hinic_info(nic_dev, drv, "Failed to %s PFC\n",
+ dcb_cfg->pfc_state ? "enable" : "disable");
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ if (dcb_cfg->pfc_state)
+ hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n",
+ pfc_map);
+ else
+ hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n");
+
+ nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC);
+ }
+
+out:
+ hinic_start_port_traffic_flow(nic_dev);
+
+ return state;
+}
+
+#ifdef NUMTCS_RETURNS_U8
+static u8 hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+#else
+static int hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+#endif
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+
+ if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ return -EINVAL;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = dcb_cfg->pg_tcs;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = dcb_cfg->pfc_tcs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef NUMTCS_RETURNS_U8
+static u8 hinic_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+#else
+static int hinic_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+#endif
+{
+ return -EINVAL;
+}
+
+static u8 hinic_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return (u8)nic_dev->dcb_cfg.pfc_state;
+}
+
+static void hinic_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->tmp_dcb_cfg.pfc_state = !!state;
+}
+
+static u8 hinic_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return nic_dev->dcbx_cap;
+}
+
+static u8 hinic_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if ((mode & DCB_CAP_DCBX_VER_IEEE) ||
+ ((mode & DCB_CAP_DCBX_LLD_MANAGED) &&
+ (!(mode & DCB_CAP_DCBX_HOST))))
+ return 1;
+
+ if (nic_dev->dcbx_cap == mode)
+ return 0;
+
+ nic_dev->dcbx_cap = mode;
+
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ u8 mask = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX |
+ DCB_CFG_CHG_PG_RX;
+ nic_dev->dcb_changes |= mask;
+ hinic_dcbnl_set_all(netdev);
+ } else {
+ err = hinic_setup_tc(netdev, 0);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to setup tc\n");
+ return 1;
+ }
+ }
+
+ nicif_info(nic_dev, drv, netdev, "Change dcbx mode to 0x%x\n", mode);
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops hinic_dcbnl_ops = {
+ .getstate = hinic_dcbnl_get_state,
+ .setstate = hinic_dcbnl_set_state,
+ .getpermhwaddr = hinic_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = hinic_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = hinic_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = hinic_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = hinic_dcbnl_set_pg_bwg_cfg_rx,
+ .getpgtccfgtx = hinic_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = hinic_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = hinic_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = hinic_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = hinic_dcbnl_set_pfc_cfg,
+ .getpfccfg = hinic_dcbnl_get_pfc_cfg,
+ .setall = hinic_dcbnl_set_all,
+ .getcap = hinic_dcbnl_getcap,
+ .getnumtcs = hinic_dcbnl_getnumtcs,
+ .setnumtcs = hinic_dcbnl_setnumtcs,
+ .getpfcstate = hinic_dcbnl_getpfcstate,
+ .setpfcstate = hinic_dcbnl_setpfcstate,
+ .getdcbx = hinic_dcbnl_getdcbx,
+ .setdcbx = hinic_dcbnl_setdcbx,
+};
+
+int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ u8 state;
+
+ hinic_dcb_config_init(nic_dev, &nic_dev->tmp_dcb_cfg);
+ state = hinic_dcbnl_set_all(netdev);
+ if (state == DCB_HW_CFG_ERR)
+ return -EFAULT;
+
+ if (state == DCB_HW_CFG_CHG)
+ hinic_info(nic_dev, drv,
+ "Reset hardware DCB configuration done\n");
+
+ return 0;
+}
+
+void hinic_configure_dcb(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
+ memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->save_dcb_cfg,
+ sizeof(nic_dev->tmp_dcb_cfg));
+ hinic_dcbnl_set_all(netdev);
+ } else {
+ memcpy(&nic_dev->save_dcb_cfg, &nic_dev->tmp_dcb_cfg,
+ sizeof(nic_dev->save_dcb_cfg));
+ err = hinic_dcb_reset_hw_config(nic_dev);
+ if (err)
+ nicif_warn(nic_dev, drv, netdev,
+ "Failed to reset hw dcb configuration\n");
+ }
+}
+
+static bool __is_cos_up_map_change(struct hinic_nic_dev *nic_dev, u8 *cos_up)
+{
+ int cos, up;
+
+ for (cos = 0; cos < nic_dev->max_cos; cos++) {
+ up = cos_up[cos];
+ if (BIT(up) != (nic_dev->up_valid_bitmap & BIT(up)))
+ return true;
+ }
+
+ return false;
+}
+
+int __set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up)
+{
+ struct net_device *netdev;
+ u8 state;
+ int err = 0;
+
+ if (!nic_dev || !cos_up)
+ return -EINVAL;
+
+ netdev = nic_dev->netdev;
+
+ if (test_and_set_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Cos_up map setting in inprocess, please try again later\n");
+ return -EFAULT;
+ }
+
+ nicif_info(nic_dev, drv, netdev, "Set cos2up:%d%d%d%d%d%d%d%d\n",
+ cos_up[0], cos_up[1], cos_up[2], cos_up[3],
+ cos_up[4], cos_up[5], cos_up[6], cos_up[7]);
+
+ if (!__is_cos_up_map_change(nic_dev, cos_up)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Same mapping, don't need to change anything\n");
+ err = 0;
+ goto out;
+ }
+
+ err = hinic_set_up_cos_map(nic_dev, nic_dev->max_cos, cos_up);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ nic_dev->dcb_changes = DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX |
+ DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS;
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
+ /* Change map in kernel */
+ hinic_set_prio_tc_map(nic_dev);
+
+ state = hinic_dcbnl_set_all(netdev);
+ if (state == DCB_HW_CFG_ERR) {
+ nicif_err(nic_dev, drv, netdev,
+ "Reconfig dcb to hw failed\n");
+ err = -EFAULT;
+ }
+ }
+
+out:
+ clear_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags);
+
+ return err;
+}
+
+int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos)
+{
+ if (!nic_dev || !num_cos)
+ return -EINVAL;
+
+ *num_cos = nic_dev->max_cos;
+
+ return 0;
+}
+
+int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *num_cos,
+ u8 *cos_up)
+{
+ u8 up, cos;
+
+ if (!nic_dev || !cos_up)
+ return -EINVAL;
+
+ for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) {
+ for (up = 0; up < HINIC_DCB_UP_MAX; up++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(up)))
+ continue;
+
+ if (nic_dev->up_cos[up] == cos ||
+ nic_dev->up_cos[up] == nic_dev->default_cos_id)
+ cos_up[cos] = up;
+ }
+ }
+
+ *num_cos = nic_dev->max_cos;
+
+ return 0;
+}
+
+static int __stop_port_flow(void *uld_array[], u32 num_dev)
+{
+ struct hinic_nic_dev *tmp_dev;
+ u32 i, idx;
+ int err;
+
+ for (idx = 0; idx < num_dev; idx++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
+ err = hinic_stop_port_traffic_flow(tmp_dev);
+ if (err) {
+ nicif_err(tmp_dev, drv, tmp_dev->netdev,
+ "Stop port traffic flow failed\n");
+ goto stop_port_err;
+ }
+ }
+
+ /* wait all traffic flow stopped */
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ return 0;
+
+stop_port_err:
+ for (i = 0; i < idx; i++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[i];
+ hinic_start_port_traffic_flow(tmp_dev);
+ }
+
+ return err;
+}
+
+static void __start_port_flow(void *uld_array[], u32 num_dev)
+{
+ struct hinic_nic_dev *tmp_dev;
+ u32 idx;
+
+ for (idx = 0; idx < num_dev; idx++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
+ hinic_start_port_traffic_flow(tmp_dev);
+ }
+}
+
+/* for hinicadm tool, need to chang all port of the chip */
+int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up)
+{
+ void *uld_array[HINIC_MAX_PF_NUM];
+ struct hinic_nic_dev *tmp_dev;
+ u8 num_cos, old_cos_up[HINIC_DCB_COS_MAX] = {0};
+ u32 i, idx, num_dev = 0;
+ int err;
+
+ /* Save old map, in case of set failed */
+ err = hinic_get_cos_up_map(nic_dev, &num_cos, old_cos_up);
+ if (err || !num_cos) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Get old cos_up map failed\n");
+ return -EFAULT;
+ }
+
+ if (!memcmp(cos_up, old_cos_up, sizeof(u8) * num_cos)) {
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Same cos2up map, don't need to change anything\n");
+ return 0;
+ }
+
+ /* Get all pf of this chip */
+ err = hinic_get_pf_uld_array(nic_dev->pdev, &num_dev, uld_array);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Get all pf private handle failed\n");
+ return -EFAULT;
+ }
+
+ err = __stop_port_flow(uld_array, num_dev);
+ if (err)
+ return -EFAULT;
+
+ for (idx = 0; idx < num_dev; idx++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
+ err = __set_cos_up_map(tmp_dev, cos_up);
+ if (err) {
+ nicif_err(tmp_dev, drv, tmp_dev->netdev,
+ "Set cos_up map to hw failed\n");
+ goto set_err;
+ }
+ }
+
+ __start_port_flow(uld_array, num_dev);
+
+ hinic_set_chip_cos_up_map(nic_dev->pdev, cos_up);
+
+ return 0;
+
+set_err:
+ /* undo all settings */
+ for (i = 0; i < idx; i++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[i];
+ err = __set_cos_up_map(tmp_dev, old_cos_up);
+ if (err)
+ nicif_err(tmp_dev, drv, tmp_dev->netdev,
+ "Undo cos_up map to hw failed\n");
+ }
+
+ __start_port_flow(uld_array, num_dev);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
new file mode 100644
index 000000000000..62b995759237
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_DCB_H_
+#define HINIC_DCB_H_
+
+#define HINIC_DCB_CFG_TX 0
+#define HINIC_DCB_CFG_RX 1
+
+enum HINIC_DCB_FLAGS {
+ HINIC_DCB_UP_COS_SETTING,
+ HINIC_DCB_TRAFFIC_STOPPED,
+};
+
+extern const struct dcbnl_rtnl_ops hinic_dcbnl_ops;
+
+u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up);
+
+int hinic_dcb_init(struct hinic_nic_dev *nic_dev);
+
+int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev);
+
+int hinic_setup_tc(struct net_device *netdev, u8 tc);
+
+void hinic_configure_dcb(struct net_device *netdev);
+
+int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up);
+
+int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos);
+
+int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev,
+ u8 *num_cos, u8 *cos_up);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c
b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
index 8f64c6a6ebcd..29c27a1d77b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_lld.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
@@ -1880,7 +1880,7 @@ static int mapping_bar(struct pci_dev *pdev, struct hinic_pcidev
*pci_adapter)
/* arm do not support call ioremap_wc(), refer to */
pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, HINIC_DB_DWQE_SIZE,
- __pgprot(PROT_DEVICE_NGNRNE));
+ __pgprot(PROT_DEVICE_nGnRnE));
if (!pci_adapter->dwqe_mapping) {
sdk_err(&pci_adapter->pcidev->dev, "Failed to
io_mapping_create_wc\n");
goto mapping_dwqe_err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c
b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 2352046971a4..5dd611e1f057 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -1,5 +1,5 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
@@ -13,1086 +13,3069 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/device.h>
-#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
-#include <linux/slab.h>
#include <linux/if_vlan.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <net/ip.h>
-#include <linux/bitops.h>
-#include <linux/bitmap.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-
-#include "hinic_hw_qp.h"
-#include "hinic_hw_dev.h"
-#include "hinic_port.h"
+#include <linux/ethtool.h>
+#include <linux/dcbnl.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/debugfs.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
-#include "hinic_dev.h"
-
-MODULE_AUTHOR("Huawei Technologies CO., Ltd");
-MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
-MODULE_LICENSE("GPL");
-
-static unsigned int tx_weight = 64;
-module_param(tx_weight, uint, 0644);
-MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)");
-
-static unsigned int rx_weight = 64;
-module_param(rx_weight, uint, 0644);
-MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
-
-#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
-#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200
-#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201
+#include "hinic_qp.h"
+#include "hinic_dcb.h"
+#include "hinic_lld.h"
+#include "hinic_sriov.h"
+#include "hinic_pci_id_tbl.h"
+
+static u16 num_qps;
+module_param(num_qps, ushort, 0444);
+MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default unset)");
+
+#define DEFAULT_POLL_WEIGHT 64
+static unsigned int poll_weight = DEFAULT_POLL_WEIGHT;
+module_param(poll_weight, uint, 0444);
+MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)");
+
+#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32
+#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+
+static unsigned char qp_pending_limit = HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
+module_param(qp_pending_limit, byte, 0444);
+MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter
pending_limit (default=2)");
+
+static unsigned char qp_coalesc_timer_cfg =
+ HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+module_param(qp_coalesc_timer_cfg, byte, 0444);
+MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter
coalesc_timer_cfg (default=32)");
+
+/* For arm64 server, the best known configuration of lro max wqe number
+ * is 4 (8K), for x86_64 server, it is 8 (16K). You can also
+ * configure these values by hinicadm.
+ */
+static unsigned char set_max_wqe_num;
+module_param(set_max_wqe_num, byte, 0444);
+MODULE_PARM_DESC(set_max_wqe_num, "Set lro max wqe number, valid range is 1 - 32,
default is 4(arm) / 8(x86)");
-#define HINIC_WQ_NAME "hinic_dev"
+#define DEFAULT_RX_BUFF_LEN 2
+u16 rx_buff = DEFAULT_RX_BUFF_LEN;
+module_param(rx_buff, ushort, 0444);
+MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default
is 2KB");
-#define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+static u32 set_lro_timer;
+module_param(set_lro_timer, uint, 0444);
+MODULE_PARM_DESC(set_lro_timer, "Set lro timer in micro second, valid range is 1 -
1024, default is 16");
-#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
+static unsigned char set_link_status_follow = HINIC_LINK_FOLLOW_STATUS_MAX;
+module_param(set_link_status_follow, byte, 0444);
+MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status. 0 -
default, 1 - follow, 2 - separate, other - unset. (default unset)");
-#define work_to_rx_mode_work(work) \
- container_of(work, struct hinic_rx_mode_work, work)
-#define rx_mode_work_to_nic_dev(rx_mode_work) \
- container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
+static unsigned int lro_replenish_thld = 256;
+module_param(lro_replenish_thld, uint, 0444);
+MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer
(default=256)");
-static int change_mac_addr(struct net_device *netdev, const u8 *addr);
-static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
- enum hinic_speed speed)
-{
- switch (speed) {
- case HINIC_SPEED_10MB_LINK:
- link_ksettings->base.speed = SPEED_10;
- break;
+static bool l2nic_interrupt_switch = true;
+module_param(l2nic_interrupt_switch, bool, 0644);
+MODULE_PARM_DESC(l2nic_interrupt_switch, "Control whether execute l2nic io interrupt
switch or not, default is true");
- case HINIC_SPEED_100MB_LINK:
- link_ksettings->base.speed = SPEED_100;
- break;
+static unsigned char lro_en_status = HINIC_LRO_STATUS_UNSET;
+module_param(lro_en_status, byte, 0444);
+MODULE_PARM_DESC(lro_en_status, "lro enable status. 0 - disable, 1 - enable, other -
unset. (default unset)");
- case HINIC_SPEED_1000MB_LINK:
- link_ksettings->base.speed = SPEED_1000;
- break;
- case HINIC_SPEED_10GB_LINK:
- link_ksettings->base.speed = SPEED_10000;
- break;
+static unsigned int enable_bp;/*lint !e728*/
- case HINIC_SPEED_25GB_LINK:
- link_ksettings->base.speed = SPEED_25000;
- break;
+static unsigned int bp_lower_thd = HINIC_RX_BP_LOWER_THD;
+static unsigned int bp_upper_thd = HINIC_RX_BP_UPPER_THD;
- case HINIC_SPEED_40GB_LINK:
- link_ksettings->base.speed = SPEED_40000;
- break;
+#define HINIC_NIC_DEV_WQ_NAME "hinic_nic_dev_wq"
- case HINIC_SPEED_100GB_LINK:
- link_ksettings->base.speed = SPEED_100000;
- break;
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK)
- default:
- link_ksettings->base.speed = SPEED_UNKNOWN;
- break;
- }
-}
+#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1))
-static int hinic_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings
- *link_ksettings)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- enum hinic_port_link_state link_state;
- struct hinic_port_cap port_cap;
- int err;
+#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap))
- ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
- Autoneg);
+#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8)
- link_ksettings->base.speed = SPEED_UNKNOWN;
- link_ksettings->base.autoneg = AUTONEG_DISABLE;
- link_ksettings->base.duplex = DUPLEX_UNKNOWN;
+#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \
+ VLAN_BITMAP_BITS_SIZE(nic_dev))
- err = hinic_port_get_cap(nic_dev, &port_cap);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to get port capabilities\n");
- return err;
- }
+#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \
+ VLAN_BITMAP_BYTE_SIZE(nic_dev))
- err = hinic_port_link_state(nic_dev, &link_state);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to get port link state\n");
- return err;
- }
+#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev))
+#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1))
- if (link_state != HINIC_LINK_STATE_UP) {
- netif_info(nic_dev, drv, netdev, "No link\n");
- return err;
- }
+enum hinic_rx_mod {
+ HINIC_RX_MODE_UC = 1 << 0,
+ HINIC_RX_MODE_MC = 1 << 1,
+ HINIC_RX_MODE_BC = 1 << 2,
+ HINIC_RX_MODE_MC_ALL = 1 << 3,
+ HINIC_RX_MODE_PROMISC = 1 << 4,
+};
- set_link_speed(link_ksettings, port_cap.speed);
+enum hinic_rx_buff_len {
+ RX_BUFF_VALID_2KB = 2,
+ RX_BUFF_VALID_4KB = 4,
+ RX_BUFF_VALID_8KB = 8,
+ RX_BUFF_VALID_16KB = 16,
+};
- if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising, Autoneg);
+#define HINIC_AVG_PKT_SMALL 256U
+#define HINIC_MODERATONE_DELAY HZ
+#define CONVERT_UNIT 1024
- if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
- link_ksettings->base.autoneg = AUTONEG_ENABLE;
+#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN
+int hinic_netdev_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr);
- link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ?
- DUPLEX_FULL : DUPLEX_HALF;
- return 0;
-}
+static int hinic_netdev_notifiers_ref_cnt;
+static struct notifier_block hinic_netdev_notifier = {
+ .notifier_call = hinic_netdev_event,
+};
-static void hinic_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
+static void hinic_register_notifier(struct hinic_nic_dev *nic_dev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
-
- strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
-}
+ int err;
-static void hinic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- ring->rx_max_pending = HINIC_RQ_DEPTH;
- ring->tx_max_pending = HINIC_SQ_DEPTH;
- ring->rx_pending = HINIC_RQ_DEPTH;
- ring->tx_pending = HINIC_SQ_DEPTH;
+ hinic_netdev_notifiers_ref_cnt++;
+ if (hinic_netdev_notifiers_ref_cnt == 1) {
+ err = register_netdevice_notifier(&hinic_netdev_notifier);
+ if (err) {
+ hinic_info(nic_dev, drv, "Register netdevice notifier failed, err: %d\n",
+ err);
+ hinic_netdev_notifiers_ref_cnt--;
+ }
+ }
}
-static void hinic_get_channels(struct net_device *netdev,
- struct ethtool_channels *channels)
+static void hinic_unregister_notifier(struct hinic_nic_dev *nic_dev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ if (hinic_netdev_notifiers_ref_cnt == 1)
+ unregister_netdevice_notifier(&hinic_netdev_notifier);
- channels->max_rx = hwdev->nic_cap.max_qps;
- channels->max_tx = hwdev->nic_cap.max_qps;
- channels->max_other = 0;
- channels->max_combined = 0;
- channels->rx_count = hinic_hwdev_num_qps(hwdev);
- channels->tx_count = hinic_hwdev_num_qps(hwdev);
- channels->other_count = 0;
- channels->combined_count = 0;
+ if (hinic_netdev_notifiers_ref_cnt)
+ hinic_netdev_notifiers_ref_cnt--;
}
-static const struct ethtool_ops hinic_ethtool_ops = {
- .get_link_ksettings = hinic_get_link_ksettings,
- .get_drvinfo = hinic_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_ringparam = hinic_get_ringparam,
- .get_channels = hinic_get_channels,
-};
+#define HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 2
+#define HINIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \
+ NETIF_F_ALL_TSO)
-static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
+int hinic_netdev_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
{
- struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
- struct hinic_rxq_stats rx_stats;
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *real_dev, *ret;
+ struct hinic_nic_dev *nic_dev;
+ u16 vlan_depth;
+
+ if (!is_vlan_dev(ndev))
+ return NOTIFY_DONE;
+
+ dev_hold(ndev);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ real_dev = vlan_dev_real_dev(ndev);
+ nic_dev = hinic_get_uld_dev_by_ifname(real_dev->name,
+ SERVICE_T_NIC);
+ if (!nic_dev)
+ goto out;
+
+ vlan_depth = 1;
+ ret = vlan_dev_priv(ndev)->real_dev;
+ while (is_vlan_dev(ret)) {
+ ret = vlan_dev_priv(ret)->real_dev;
+ vlan_depth++;
+ }
+
+ if (vlan_depth == HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) {
+ ndev->vlan_features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
+ } else if (vlan_depth > HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) {
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ set_netdev_hw_features(ndev,
+ get_netdev_hw_features(ndev) &
+ (~HINIC_VLAN_CLEAR_OFFLOAD));
+#else
+ ndev->hw_features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
+#endif
+#endif
+ ndev->features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
+ }
- u64_stats_init(&rx_stats.syncp);
+ break;
- hinic_rxq_get_stats(rxq, &rx_stats);
+ default:
+ break;
+ };
- u64_stats_update_begin(&nic_rx_stats->syncp);
- nic_rx_stats->bytes += rx_stats.bytes;
- nic_rx_stats->pkts += rx_stats.pkts;
- u64_stats_update_end(&nic_rx_stats->syncp);
+out:
+ dev_put(ndev);
- hinic_rxq_clean_stats(rxq);
+ return NOTIFY_DONE;
}
+#endif
-static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
+void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status)
{
- struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
- struct hinic_txq_stats tx_stats;
+ struct net_device *netdev = nic_dev->netdev;
- u64_stats_init(&tx_stats.syncp);
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
+ test_bit(HINIC_LP_TEST, &nic_dev->flags))
+ return;
- hinic_txq_get_stats(txq, &tx_stats);
+ if (status) {
+ if (netif_carrier_ok(netdev))
+ return;
- u64_stats_update_begin(&nic_tx_stats->syncp);
- nic_tx_stats->bytes += tx_stats.bytes;
- nic_tx_stats->pkts += tx_stats.pkts;
- nic_tx_stats->tx_busy += tx_stats.tx_busy;
- nic_tx_stats->tx_wake += tx_stats.tx_wake;
- nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
- u64_stats_update_end(&nic_tx_stats->syncp);
+ nic_dev->link_status = status;
+ netif_carrier_on(netdev);
+ nicif_info(nic_dev, link, netdev, "Link is up\n");
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
- hinic_txq_clean_stats(txq);
+ nic_dev->link_status = status;
+ netif_carrier_off(netdev);
+ nicif_info(nic_dev, link, netdev, "Link is down\n");
+ }
}
-static void update_nic_stats(struct hinic_dev *nic_dev)
+static void hinic_heart_lost(struct hinic_nic_dev *nic_dev)
{
- int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
-
- for (i = 0; i < num_qps; i++)
- update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
-
- for (i = 0; i < num_qps; i++)
- update_tx_stats(nic_dev, &nic_dev->txqs[i]);
+ nic_dev->heart_status = false;
}
-/**
- * create_txqs - Create the Logical Tx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int create_txqs(struct hinic_dev *nic_dev)
+static int hinic_setup_qps_resources(struct hinic_nic_dev *nic_dev)
{
- int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t txq_size;
-
- if (nic_dev->txqs)
- return -EINVAL;
-
- txq_size = num_txqs * sizeof(*nic_dev->txqs);
- nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
- if (!nic_dev->txqs)
- return -ENOMEM;
+ int err;
- for (i = 0; i < num_txqs; i++) {
- struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
+ err = hinic_setup_all_tx_resources(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to create Tx queues\n");
+ return err;
+ }
- err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to init Txq\n");
- goto err_init_txq;
- }
+ err = hinic_setup_all_rx_resources(netdev, nic_dev->qps_irq_info);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to create Rx queues\n");
+ goto create_rxqs_err;
}
return 0;
-err_init_txq:
- for (j = 0; j < i; j++)
- hinic_clean_txq(&nic_dev->txqs[j]);
+create_rxqs_err:
+ hinic_free_all_tx_resources(netdev);
- devm_kfree(&netdev->dev, nic_dev->txqs);
return err;
}
-/**
- * free_txqs - Free the Logical Tx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- **/
-static void free_txqs(struct hinic_dev *nic_dev)
-{
- int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
- struct net_device *netdev = nic_dev->netdev;
-
- if (!nic_dev->txqs)
- return;
-
- for (i = 0; i < num_txqs; i++)
- hinic_clean_txq(&nic_dev->txqs[i]);
-
- devm_kfree(&netdev->dev, nic_dev->txqs);
- nic_dev->txqs = NULL;
-}
-
-/**
- * create_txqs - Create the Logical Rx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int create_rxqs(struct hinic_dev *nic_dev)
+static int hinic_configure(struct hinic_nic_dev *nic_dev)
{
- int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t rxq_size;
-
- if (nic_dev->rxqs)
- return -EINVAL;
-
- rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
- nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
- if (!nic_dev->rxqs)
- return -ENOMEM;
-
- for (i = 0; i < num_rxqs; i++) {
- struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
+ int err;
- err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to init rxq\n");
- goto err_init_rxq;
- }
+ /* rx rss init */
+ err = hinic_rx_configure(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n");
+ return err;
}
return 0;
+}
-err_init_rxq:
- for (j = 0; j < i; j++)
- hinic_clean_rxq(&nic_dev->rxqs[j]);
-
- devm_kfree(&netdev->dev, nic_dev->rxqs);
- return err;
+static void hinic_remove_configure(struct hinic_nic_dev *nic_dev)
+{
+ hinic_rx_remove_configure(nic_dev->netdev);
}
-/**
- * free_txqs - Free the Logical Rx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- **/
-static void free_rxqs(struct hinic_dev *nic_dev)
+static void hinic_setup_dcb_qps(struct hinic_nic_dev *nic_dev, u16 max_qps)
{
- int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
+ u16 num_rss;
+ u8 num_tcs;
+ u8 i;
- if (!nic_dev->rxqs)
+ if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) ||
+ !test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
return;
- for (i = 0; i < num_rxqs; i++)
- hinic_clean_rxq(&nic_dev->rxqs[i]);
-
- devm_kfree(&netdev->dev, nic_dev->rxqs);
- nic_dev->rxqs = NULL;
+ num_tcs = (u8)netdev_get_num_tc(netdev);
+ /* For now, we don't support to change num_tcs */
+ if (num_tcs != nic_dev->max_cos || max_qps < num_tcs) {
+ nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %d or num_qps: %d, disable
DCB\n",
+ num_tcs, max_qps);
+ netdev_reset_tc(netdev);
+ clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ /* if we can't enable rss or get enough num_qps,
+ * need to sync default configure to hw
+ */
+ hinic_configure_dcb(netdev);
+ } else {
+ /* We bind sq with cos but not tc */
+ num_rss = (u16)(max_qps / nic_dev->max_cos);
+ num_rss = min_t(u16, num_rss, nic_dev->rss_limit);
+ for (i = 0; i < nic_dev->max_cos; i++)
+ netdev_set_tc_queue(netdev, i, num_rss,
+ (u16)(num_rss * i));
+
+ nic_dev->num_rss = num_rss;
+ nic_dev->num_qps = (u16)(num_tcs * num_rss);
+ }
}
-static int hinic_open(struct net_device *netdev)
+/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */
+static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- enum hinic_port_link_state link_state;
- int err, ret, num_qps;
+ struct net_device *netdev = nic_dev->netdev;
+ u32 irq_size;
+ u16 resp_irq_num, i;
+ int err;
- if (!(nic_dev->flags & HINIC_INTF_UP)) {
- err = hinic_hwdev_ifup(nic_dev->hwdev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed - HW interface up\n");
- return err;
- }
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nic_dev->num_rss = nic_dev->rss_limit;
+ nic_dev->num_qps = nic_dev->rss_limit;
+ } else {
+ nic_dev->num_rss = 0;
+ nic_dev->num_qps = 1;
}
- err = create_txqs(nic_dev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to create Tx queues\n");
- goto err_create_txqs;
- }
+ hinic_setup_dcb_qps(nic_dev, nic_dev->max_qps);
- err = create_rxqs(nic_dev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to create Rx queues\n");
- goto err_create_rxqs;
+ irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->num_qps;
+ if (!irq_size) {
+ nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n");
+ return -EINVAL;
}
-
- num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
- netif_set_real_num_tx_queues(netdev, num_qps);
- netif_set_real_num_rx_queues(netdev, num_qps);
-
- err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set port state\n");
- goto err_port_state;
+ nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL);
+ if (!nic_dev->qps_irq_info) {
+ nicif_err(nic_dev, drv, netdev, "Failed to alloc msix entries\n");
+ return -ENOMEM;
}
- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE);
+ err = hinic_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->num_qps,
+ nic_dev->qps_irq_info, &resp_irq_num);
if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set func port state\n");
- goto err_func_port_state;
+ nicif_err(nic_dev, drv, netdev, "Failed to alloc irqs\n");
+ kfree(nic_dev->qps_irq_info);
+ return err;
}
- /* Wait up to 3 sec between port enable to link state */
- msleep(3000);
+ /* available irq number is less than rq numbers, adjust rq numbers */
+ if (resp_irq_num < nic_dev->num_qps) {
+ nic_dev->num_qps = resp_irq_num;
+ nic_dev->num_rss = nic_dev->num_qps;
+ hinic_setup_dcb_qps(nic_dev, nic_dev->num_qps);
+ nicif_warn(nic_dev, drv, netdev,
+ "Can not get enough irqs, adjust num_qps to %d\n",
+ nic_dev->num_qps);
+ /* after adjust num_qps, free the remaind irq */
+ for (i = nic_dev->num_qps; i < resp_irq_num; i++)
+ hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC,
+ nic_dev->qps_irq_info[i].irq_id);
+ }
- down(&nic_dev->mgmt_lock);
+ nicif_info(nic_dev, drv, netdev, "Finally num_qps: %d, num_rss: %d\n",
+ nic_dev->num_qps, nic_dev->num_rss);
- err = hinic_port_link_state(nic_dev, &link_state);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to get link state\n");
- goto err_port_link;
- }
+ return 0;
+}
- if (link_state == HINIC_LINK_STATE_UP)
- nic_dev->flags |= HINIC_LINK_UP;
+static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
+{
+ u16 i;
- nic_dev->flags |= HINIC_INTF_UP;
+ for (i = 0; i < nic_dev->num_qps; i++)
+ hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC,
+ nic_dev->qps_irq_info[i].irq_id);
- if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
- (HINIC_LINK_UP | HINIC_INTF_UP)) {
- netif_info(nic_dev, drv, netdev, "link + intf UP\n");
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
- }
+ kfree(nic_dev->qps_irq_info);
+}
- up(&nic_dev->mgmt_lock);
+int hinic_poll(struct napi_struct *napi, int budget)
+{
+ int tx_pkts, rx_pkts;
+ struct hinic_irq *irq_cfg = container_of(napi, struct hinic_irq, napi);
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
- netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n");
- return 0;
+ rx_pkts = hinic_rx_poll(irq_cfg->rxq, budget);
-err_port_link:
- up(&nic_dev->mgmt_lock);
- ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
- if (ret)
- netif_warn(nic_dev, drv, netdev,
- "Failed to revert func port state\n");
+ tx_pkts = hinic_tx_poll(irq_cfg->txq, budget);
-err_func_port_state:
- ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
- if (ret)
- netif_warn(nic_dev, drv, netdev,
- "Failed to revert port state\n");
+ if (tx_pkts >= budget || rx_pkts >= budget)
+ return budget;
-err_port_state:
- free_rxqs(nic_dev);
+ napi_complete(napi);
-err_create_rxqs:
- free_txqs(nic_dev);
+ if (!test_and_set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag)) {
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ hinic_set_msix_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC_MSIX_ENABLE);
+ else if (!nic_dev->in_vm)
+ enable_irq(irq_cfg->irq_id);
+ }
-err_create_txqs:
- if (!(nic_dev->flags & HINIC_INTF_UP))
- hinic_hwdev_ifdown(nic_dev->hwdev);
- return err;
+ return max(tx_pkts, rx_pkts);
}
-static int hinic_close(struct net_device *netdev)
+static void qp_add_napi(struct hinic_irq *irq_cfg)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- unsigned int flags;
- int err;
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
- down(&nic_dev->mgmt_lock);
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi,
+ hinic_poll, nic_dev->poll_weight);
+ napi_enable(&irq_cfg->napi);
+}
- flags = nic_dev->flags;
- nic_dev->flags &= ~HINIC_INTF_UP;
+static void qp_del_napi(struct hinic_irq *irq_cfg)
+{
+ napi_disable(&irq_cfg->napi);
+ netif_napi_del(&irq_cfg->napi);
+}
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
+static irqreturn_t qp_irq(int irq, void *data)
+{
+ struct hinic_irq *irq_cfg = (struct hinic_irq *)data;
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+
+ if (l2nic_interrupt_switch) {
+ /* Disable the interrupt until napi will be completed */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ hinic_set_msix_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ else if (!nic_dev->in_vm)
+ disable_irq_nosync(irq_cfg->irq_id);
+
+ clear_bit(HINIC_INTR_ON, &irq_cfg->intr_flag);
+ }
- update_nic_stats(nic_dev);
+ /* 1 is resend_timer */
+ hinic_misx_intr_clear_resend_bit(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx, 1);
- up(&nic_dev->mgmt_lock);
+ napi_schedule(&irq_cfg->napi);
+ return IRQ_HANDLED;
+}
+
+static int hinic_request_irq(struct hinic_irq *irq_cfg, u16 q_id)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+ struct nic_interrupt_info info = {0};
+ int err;
- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
+ qp_add_napi(irq_cfg);
+
+ info.msix_index = irq_cfg->msix_entry_idx;
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt;
+ info.coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+ nic_dev->rxqs[q_id].last_coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ nic_dev->rxqs[q_id].last_pending_limt =
+ nic_dev->intr_coalesce[q_id].pending_limt;
+ err = hinic_set_interrupt_cfg(nic_dev->hwdev, info);
if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set func port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
+ nicif_err(nic_dev, drv, irq_cfg->netdev,
+ "Failed to set RX interrupt coalescing attribute.\n");
+ qp_del_napi(irq_cfg);
return err;
}
- err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
+ err = request_irq(irq_cfg->irq_id, &qp_irq, 0,
+ irq_cfg->irq_name, irq_cfg);
if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
+ nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n");
+ qp_del_napi(irq_cfg);
return err;
}
- free_rxqs(nic_dev);
- free_txqs(nic_dev);
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
- if (flags & HINIC_INTF_UP)
- hinic_hwdev_ifdown(nic_dev->hwdev);
-
- netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n");
return 0;
}
-static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
+static int set_interrupt_moder(struct hinic_nic_dev *nic_dev, u16 q_id,
+ u8 coalesc_timer_cfg, u8 pending_limt)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct nic_interrupt_info interrupt_info = {0};
int err;
- netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu);
-
- err = hinic_port_set_mtu(nic_dev, new_mtu);
- if (err)
- netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
- else
- netdev->mtu = new_mtu;
+ if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg &&
+ pending_limt == nic_dev->rxqs[q_id].last_pending_limt)
+ return 0;
+
+ /* netdev not running or qp not in using,
+ * don't need to set coalesce to hw
+ */
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
+ q_id >= nic_dev->num_qps)
+ return 0;
+
+ interrupt_info.lli_set = 0;
+ interrupt_info.interrupt_coalesc_set = 1;
+ interrupt_info.coalesc_timer_cfg = coalesc_timer_cfg;
+ interrupt_info.pending_limt = pending_limt;
+ interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx;
+ interrupt_info.resend_timer_cfg =
+ nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+
+ err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed modifying moderation for Queue: %d\n", q_id);
+ } else {
+ nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg;
+ nic_dev->rxqs[q_id].last_pending_limt = pending_limt;
+ }
return err;
}
-/**
- * change_mac_addr - change the main mac address of network device
- * @netdev: network device
- * @addr: mac address to set
- *
- * Return 0 - Success, negative - Failure
- **/
-static int change_mac_addr(struct net_device *netdev, const u8 *addr)
+static void __calc_coal_para(struct hinic_nic_dev *nic_dev,
+ struct hinic_intr_coal_info *q_coal, u64 rate,
+ u8 *coalesc_timer_cfg, u8 *pending_limt)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 vid = 0;
- int err;
+ if (rate < q_coal->pkt_rate_low) {
+ *coalesc_timer_cfg = q_coal->rx_usecs_low;
+ *pending_limt = q_coal->rx_pending_limt_low;
+ } else if (rate > q_coal->pkt_rate_high) {
+ *coalesc_timer_cfg = q_coal->rx_usecs_high;
+ *pending_limt = q_coal->rx_pending_limt_high;
+ } else {
+ *coalesc_timer_cfg =
+ (u8)((rate - q_coal->pkt_rate_low) *
+ (q_coal->rx_usecs_high -
+ q_coal->rx_usecs_low) /
+ (q_coal->pkt_rate_high -
+ q_coal->pkt_rate_low) +
+ q_coal->rx_usecs_low);
+ if (nic_dev->in_vm)
+ *pending_limt = (u8)((rate - q_coal->pkt_rate_low) *
+ (q_coal->rx_pending_limt_high -
+ q_coal->rx_pending_limt_low) /
+ (q_coal->pkt_rate_high -
+ q_coal->pkt_rate_low) +
+ q_coal->rx_pending_limt_low);
+ else
+ *pending_limt = q_coal->rx_pending_limt_low;
+ }
+}
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
+static void hinic_auto_moderation_work(struct work_struct *work)
+{
+ struct hinic_intr_coal_info *q_coal;
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic_nic_dev *nic_dev = container_of(delay,
+ struct hinic_nic_dev,
+ moderation_task);
+ unsigned long period = (unsigned long)(jiffies -
+ nic_dev->last_moder_jiffies);
+
+ u64 rx_packets, rx_bytes, rx_pkt_diff, rate, avg_pkt_size;
+ u8 coalesc_timer_cfg, pending_limt;
+ u16 qid;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags))
+ return;
- netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x
%02x\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task,
+ HINIC_MODERATONE_DELAY);
- down(&nic_dev->mgmt_lock);
+ if (!nic_dev->adaptive_rx_coal || !period)
+ return;
- do {
- err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to delete mac\n");
- break;
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ rx_packets = nic_dev->rxqs[qid].rxq_stats.packets;
+ rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes;
+ q_coal = &nic_dev->intr_coalesce[qid];
+
+ rx_pkt_diff =
+ rx_packets - nic_dev->rxqs[qid].last_moder_packets;
+ avg_pkt_size = rx_pkt_diff ?
+ ((unsigned long)(rx_bytes -
+ nic_dev->rxqs[qid].last_moder_bytes)) /
+ rx_pkt_diff : 0;
+
+ rate = rx_pkt_diff * HZ / period;
+
+ if ((rate > HINIC_RX_RATE_THRESH &&
+ avg_pkt_size > HINIC_AVG_PKT_SMALL) || nic_dev->in_vm) {
+ __calc_coal_para(nic_dev, q_coal, rate,
+ &coalesc_timer_cfg, &pending_limt);
+ } else {
+ coalesc_timer_cfg = HINIC_LOWEST_LATENCY;
+ pending_limt = q_coal->rx_pending_limt_low;
+ }
+
+ set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg,
+ pending_limt);
+
+ nic_dev->rxqs[qid].last_moder_packets = rx_packets;
+ nic_dev->rxqs[qid].last_moder_bytes = rx_bytes;
+ }
+
+ nic_dev->last_moder_jiffies = jiffies;
+}
+
+static void hinic_release_irq(struct hinic_irq *irq_cfg)
+{
+ irq_set_affinity_hint(irq_cfg->irq_id, NULL);
+ synchronize_irq(irq_cfg->irq_id);
+ free_irq(irq_cfg->irq_id, irq_cfg);
+ qp_del_napi(irq_cfg);
+}
+
+static int hinic_qps_irq_init(struct hinic_nic_dev *nic_dev)
+{
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct irq_info *qp_irq_info;
+ struct hinic_irq *irq_cfg;
+ u16 q_id, i;
+ u32 local_cpu;
+ int err;
+
+ nic_dev->irq_cfg = kcalloc(nic_dev->num_qps, sizeof(*nic_dev->irq_cfg),
+ GFP_KERNEL);
+ if (!nic_dev->irq_cfg) {
+ nic_err(&pdev->dev, "Failed to alloc irq cfg\n");
+ return -ENOMEM;
+ }
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ qp_irq_info = &nic_dev->qps_irq_info[q_id];
+ irq_cfg = &nic_dev->irq_cfg[q_id];
+
+ irq_cfg->irq_id = qp_irq_info->irq_id;
+ irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx;
+ irq_cfg->netdev = nic_dev->netdev;
+ irq_cfg->txq = &nic_dev->txqs[q_id];
+ irq_cfg->rxq = &nic_dev->rxqs[q_id];
+ nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
+
+ if (nic_dev->force_affinity) {
+ irq_cfg->affinity_mask = nic_dev->affinity_mask;
+ } else {
+ local_cpu =
+ cpumask_local_spread(q_id,
+ dev_to_node(&pdev->dev));
+ cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
}
- err = hinic_port_add_mac(nic_dev, addr, vid);
+ snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
+ "%s_qp%d", nic_dev->netdev->name, q_id);
+ err = hinic_request_irq(irq_cfg, q_id);
if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
- break;
+ nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n");
+ goto req_tx_irq_err;
}
- vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
- } while (vid != VLAN_N_VID);
+ hinic_set_msix_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC_MSIX_ENABLE);
+ set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag);
+ }
+
+ INIT_DELAYED_WORK(&nic_dev->moderation_task,
+ hinic_auto_moderation_work);
+
+ return 0;
+
+req_tx_irq_err:
+ for (i = 0; i < q_id; i++) {
+ hinic_set_msix_state(nic_dev->hwdev,
+ nic_dev->irq_cfg[i].msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ hinic_release_irq(&nic_dev->irq_cfg[i]);
+ }
+
+ kfree(nic_dev->irq_cfg);
- up(&nic_dev->mgmt_lock);
return err;
}
-static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
+static void hinic_qps_irq_deinit(struct hinic_nic_dev *nic_dev)
+{
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ hinic_set_msix_state(nic_dev->hwdev,
+ nic_dev->irq_cfg[q_id].msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ hinic_release_irq(&nic_dev->irq_cfg[q_id]);
+ }
+
+ kfree(nic_dev->irq_cfg);
+}
+
+int hinic_force_port_disable(struct hinic_nic_dev *nic_dev)
{
- unsigned char new_mac[ETH_ALEN];
- struct sockaddr *saddr = addr;
int err;
- memcpy(new_mac, saddr->sa_data, ETH_ALEN);
+ down(&nic_dev->port_state_sem);
- err = change_mac_addr(netdev, new_mac);
+ err = hinic_set_port_enable(nic_dev->hwdev, false);
if (!err)
- memcpy(netdev->dev_addr, new_mac, ETH_ALEN);
+ nic_dev->force_port_disable = true;
+
+ up(&nic_dev->port_state_sem);
return err;
}
-/**
- * add_mac_addr - add mac address to network device
- * @netdev: network device
- * @addr: mac address to add
- *
- * Return 0 - Success, negative - Failure
- **/
-static int add_mac_addr(struct net_device *netdev, const u8 *addr)
+int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 vid = 0;
- int err;
+ int err = 0;
- netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x
%02x\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ down(&nic_dev->port_state_sem);
- down(&nic_dev->mgmt_lock);
-
- do {
- err = hinic_port_add_mac(nic_dev, addr, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
- break;
- }
+ nic_dev->force_port_disable = false;
+ err = hinic_set_port_enable(nic_dev->hwdev, enable);
- vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
- } while (vid != VLAN_N_VID);
+ up(&nic_dev->port_state_sem);
- up(&nic_dev->mgmt_lock);
return err;
}
-/**
- * remove_mac_addr - remove mac address from network device
- * @netdev: network device
- * @addr: mac address to remove
- *
- * Return 0 - Success, negative - Failure
- **/
-static int remove_mac_addr(struct net_device *netdev, const u8 *addr)
+int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 vid = 0;
int err;
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
-
- netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x
%02x\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ down(&nic_dev->port_state_sem);
- down(&nic_dev->mgmt_lock);
+ /* Do nothing when force disable
+ * Port will disable when call force port disable
+ * and should not enable port when in force mode
+ */
+ if (nic_dev->force_port_disable) {
+ up(&nic_dev->port_state_sem);
+ return 0;
+ }
- do {
- err = hinic_port_del_mac(nic_dev, addr, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to delete mac\n");
- break;
- }
+ err = hinic_set_port_enable(nic_dev->hwdev, enable);
- vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
- } while (vid != VLAN_N_VID);
+ up(&nic_dev->port_state_sem);
- up(&nic_dev->mgmt_lock);
return err;
}
-static int hinic_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static void hinic_print_link_message(struct hinic_nic_dev *nic_dev,
+ u8 link_status)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int ret, err;
+ if (nic_dev->link_status == link_status)
+ return;
- netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid);
+ nic_dev->link_status = link_status;
- down(&nic_dev->mgmt_lock);
+ nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n",
+ (link_status ? "up" : "down"));
+}
- err = hinic_port_add_vlan(nic_dev, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to add vlan\n");
- goto err_vlan_add;
+int hinic_open(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 link_status = 0;
+ int err;
+
+ if (test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n");
+ return 0;
}
- err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
+ err = hinic_setup_num_qps(nic_dev);
if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
- goto err_add_mac;
+ nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n");
+ return err;
}
- bitmap_set(nic_dev->vlan_bitmap, vid, 1);
+ err = hinic_create_qps(nic_dev->hwdev, nic_dev->num_qps,
+ nic_dev->sq_depth, nic_dev->rq_depth,
+ nic_dev->qps_irq_info, HINIC_MAX_SQ_BUFDESCS);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to create queue pairs\n");
+ goto create_qps_err;
+ }
- up(&nic_dev->mgmt_lock);
- return 0;
+ err = hinic_setup_qps_resources(nic_dev);
+ if (err)
+ goto setup_qps_resources_err;
-err_add_mac:
- ret = hinic_port_del_vlan(nic_dev, vid);
- if (ret)
- netif_err(nic_dev, drv, netdev,
- "Failed to revert by removing vlan\n");
+ err = hinic_init_qp_ctxts(nic_dev->hwdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to init qp ctxts\n");
+ goto init_qp_ctxts_err;
+ }
-err_vlan_add:
- up(&nic_dev->mgmt_lock);
- return err;
-}
+ err = hinic_set_port_mtu(nic_dev->hwdev, netdev->mtu);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n");
+ goto mtu_err;
+ }
-static int hinic_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err;
+ err = hinic_configure(nic_dev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to configure txrx\n");
+ goto cfg_err;
+ }
- netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid);
+ err = hinic_qps_irq_init(nic_dev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to qps irq init\n");
+ goto qps_irqs_init_err;
+ }
- down(&nic_dev->mgmt_lock);
+ err = hinic_set_vport_enable(nic_dev->hwdev, true);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n");
+ goto vport_enable_err;
+ }
- err = hinic_port_del_vlan(nic_dev, vid);
+ err = hinic_maybe_set_port_state(nic_dev, true);
if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
- goto err_del_vlan;
+ nicif_err(nic_dev, drv, netdev, "Failed to enable port\n");
+ goto port_enable_err;
}
- bitmap_clear(nic_dev->vlan_bitmap, vid, 1);
+ set_bit(HINIC_INTF_UP, &nic_dev->flags);
- up(&nic_dev->mgmt_lock);
- return 0;
+ netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
+ netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
+ netif_tx_wake_all_queues(netdev);
-err_del_vlan:
- up(&nic_dev->mgmt_lock);
- return err;
-}
+ queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task,
+ HINIC_MODERATONE_DELAY);
-static void set_rx_mode(struct work_struct *work)
-{
- struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
- struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
- struct netdev_hw_addr *ha;
+ err = hinic_get_link_state(nic_dev->hwdev, &link_status);
+ if (!err && link_status) {
+ hinic_update_pf_bw(nic_dev->hwdev);
+ netif_carrier_on(netdev);
+ }
- netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
+ hinic_print_link_message(nic_dev, link_status);
- hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status);
- __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
- __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+ nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n");
- netdev_for_each_mc_addr(ha, nic_dev->netdev)
- add_mac_addr(nic_dev->netdev, ha->addr);
-}
+ return 0;
-static void hinic_set_rx_mode(struct net_device *netdev)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rx_mode_work *rx_mode_work;
- u32 rx_mode;
+port_enable_err:
+ hinic_set_vport_enable(nic_dev->hwdev, false);
- rx_mode_work = &nic_dev->rx_mode_work;
+vport_enable_err:
+ hinic_flush_sq_res(nic_dev->hwdev);
+ /* After set vport disable 100ms, no packets will be send to host*/
+ msleep(100);
+ hinic_qps_irq_deinit(nic_dev);
- rx_mode = HINIC_RX_MODE_UC |
- HINIC_RX_MODE_MC |
- HINIC_RX_MODE_BC;
+qps_irqs_init_err:
+ hinic_remove_configure(nic_dev);
- if (netdev->flags & IFF_PROMISC)
- rx_mode |= HINIC_RX_MODE_PROMISC;
- else if (netdev->flags & IFF_ALLMULTI)
- rx_mode |= HINIC_RX_MODE_MC_ALL;
+cfg_err:
+mtu_err:
+ hinic_free_qp_ctxts(nic_dev->hwdev);
- rx_mode_work->rx_mode = rx_mode;
+init_qp_ctxts_err:
+ hinic_free_all_rx_resources(netdev);
+ hinic_free_all_tx_resources(netdev);
- queue_work(nic_dev->workq, &rx_mode_work->work);
-}
+setup_qps_resources_err:
+ hinic_free_qps(nic_dev->hwdev);
-static void hinic_tx_timeout(struct net_device *netdev)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+create_qps_err:
+ hinic_destroy_num_qps(nic_dev);
- netif_err(nic_dev, drv, netdev, "Tx timeout\n");
+ return err;
}
-static void hinic_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+int hinic_close(struct net_device *netdev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats *nic_rx_stats;
- struct hinic_txq_stats *nic_tx_stats;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
- nic_rx_stats = &nic_dev->rx_stats;
- nic_tx_stats = &nic_dev->tx_stats;
+ if (!test_and_clear_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n");
+ return 0;
+ }
- down(&nic_dev->mgmt_lock);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
- if (nic_dev->flags & HINIC_INTF_UP)
- update_nic_stats(nic_dev);
+ cancel_delayed_work_sync(&nic_dev->moderation_task);
- up(&nic_dev->mgmt_lock);
+ if (hinic_get_chip_present_flag(nic_dev->hwdev)) {
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
- stats->rx_bytes = nic_rx_stats->bytes;
- stats->rx_packets = nic_rx_stats->pkts;
+ hinic_maybe_set_port_state(nic_dev, false);
- stats->tx_bytes = nic_tx_stats->bytes;
- stats->tx_packets = nic_tx_stats->pkts;
- stats->tx_errors = nic_tx_stats->tx_dropped;
-}
+ hinic_set_vport_enable(nic_dev->hwdev, false);
-static const struct net_device_ops hinic_netdev_ops = {
- .ndo_open = hinic_open,
- .ndo_stop = hinic_close,
- .ndo_change_mtu = hinic_change_mtu,
- .ndo_set_mac_address = hinic_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
+ hinic_flush_txqs(netdev);
+ hinic_flush_sq_res(nic_dev->hwdev);
+ /* After set vport disable 100ms,
+ * no packets will be send to host
+ */
+ msleep(100);
+ }
+
+ hinic_qps_irq_deinit(nic_dev);
+ hinic_remove_configure(nic_dev);
+
+ if (hinic_get_chip_present_flag(nic_dev->hwdev))
+ hinic_free_qp_ctxts(nic_dev->hwdev);
+
+ mutex_lock(&nic_dev->nic_mutex);
+ hinic_free_all_rx_resources(netdev);
+
+ hinic_free_all_tx_resources(netdev);
+
+ hinic_free_qps(nic_dev->hwdev);
+
+ hinic_destroy_num_qps(nic_dev);
+ mutex_unlock(&nic_dev->nic_mutex);
+
+ nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n");
+
+ return 0;
+}
+
+static inline u32 calc_toeplitz_rss(u32 sip, u32 dip, u32 sport, u32 dport,
+ const u32 *rss_key)
+{
+ u32 i, port, rss = 0;
+
+ port = (sport << 16) | dport;
+
+ /* The key - SIP, DIP, SPORT, DPORT */
+ for (i = 0; i < 32; i++)
+ if (sip & ((u32)1 << (u32)(31 - i)))
+ rss ^= (rss_key[0] << i) |
+ (u32)((u64)rss_key[1] >> (32 - i));
+
+ for (i = 0; i < 32; i++)
+ if (dip & ((u32)1 << (u32)(31 - i)))
+ rss ^= (rss_key[1] << i) |
+ (u32)((u64)rss_key[2] >> (32 - i));
+
+ for (i = 0; i < 32; i++)
+ if (port & ((u32)1 << (u32)(31 - i)))
+ rss ^= (rss_key[2] << i) |
+ (u32)((u64)rss_key[3] >> (32 - i));
+
+ return rss;
+}
+
+static u16 select_queue_by_toeplitz(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned int num_tx_queues)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(dev);
+ struct tcphdr *tcphdr;
+ struct iphdr *iphdr;
+ u32 hash = 0;
+
+ if (skb_rx_queue_recorded(skb)) {
+ hash = skb_get_rx_queue(skb);
+ while (unlikely(hash >= num_tx_queues))
+ hash -= num_tx_queues;
+ return (u16)hash;
+ }
+
+ /*lint -save -e778*/
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
+ iphdr = ip_hdr(skb);
+ if (iphdr->protocol == IPPROTO_UDP ||
+ iphdr->protocol == IPPROTO_TCP) {
+ tcphdr = tcp_hdr(skb);
+ hash = calc_toeplitz_rss(ntohl(iphdr->daddr),
+ ntohl(iphdr->saddr),
+ ntohs(tcphdr->dest),
+ ntohs(tcphdr->source),
+ nic_dev->rss_hkey_user_be);
+ }
+ }
+ /*lint -restore*/
+
+ return (u16)nic_dev->rss_indir_user[hash & 0xFF];
+}
+
+#if defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK)
+#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV)
+static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
+#else
+static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ __always_unused void *accel,
+ select_queue_fallback_t fallback)
+#endif
+
+#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL)
+static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ __always_unused void *accel)
+
+#else
+static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb)
+#endif /* end of HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (skb->vlan_tci && !skb->priority)
+ skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ if (netdev_get_num_tc(netdev) || !nic_dev->rss_hkey_user_be)
+ goto fallback;
+
+ if (nic_dev->rss_hash_engine == HINIC_RSS_HASH_ENGINE_TYPE_TOEP &&
+ test_bit(HINIC_SAME_RXTX, &nic_dev->flags))
+ return select_queue_by_toeplitz(netdev, skb,
+ netdev->real_num_tx_queues);
+
+fallback:
+
+#ifndef HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+ return skb_tx_hash(netdev, skb);
+#else
+#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV
+ return fallback(netdev, skb, sb_dev);
+#else
+ return fallback(netdev, skb);
+#endif
+#endif
+}
+
+#ifdef HAVE_NDO_GET_STATS64
+#ifdef HAVE_VOID_NDO_GET_STATS64
+static void hinic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+#else
+static struct rtnl_link_stats64
+ *hinic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+#endif
+
+#else /* !HAVE_NDO_GET_STATS64 */
+static struct net_device_stats *hinic_get_stats(struct net_device *netdev)
+#endif
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+#ifndef HAVE_NDO_GET_STATS64
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *stats = &netdev->stats;
+#else
+ struct net_device_stats *stats = &nic_dev->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+#endif /* HAVE_NDO_GET_STATS64 */
+ struct hinic_txq_stats *txq_stats;
+ struct hinic_rxq_stats *rxq_stats;
+ struct hinic_txq *txq;
+ struct hinic_rxq *rxq;
+ u64 bytes, packets, dropped, errors;
+ unsigned int start;
+ int i;
+
+ bytes = 0;
+ packets = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->txqs)
+ break;
+
+ txq = &nic_dev->txqs[i];
+ txq_stats = &txq->txq_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ bytes += txq_stats->bytes;
+ packets += txq_stats->packets;
+ dropped += txq_stats->dropped;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ }
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = dropped;
+
+ bytes = 0;
+ packets = 0;
+ errors = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->rxqs)
+ break;
+
+ rxq = &nic_dev->rxqs[i];
+ rxq_stats = &rxq->rxq_stats;
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ bytes += rxq_stats->bytes;
+ packets += rxq_stats->packets;
+ errors += rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ dropped += rxq_stats->dropped;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ }
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ stats->rx_errors = errors;
+ stats->rx_dropped = dropped;
+
+#ifndef HAVE_VOID_NDO_GET_STATS64
+ return stats;
+#endif
+}
+
+static void hinic_tx_timeout(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ HINIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout);
+ nicif_err(nic_dev, drv, netdev, "Tx timeout\n");
+}
+
+static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u32 mtu = (u32)new_mtu;
+ int err = 0;
+
+ err = hinic_set_port_mtu(nic_dev->hwdev, mtu);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n",
+ new_mtu);
+ } else {
+ nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = mtu;
+ }
+
+ return err;
+}
+
+static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!FUNC_SUPPORT_CHANGE_MAC(nic_dev->hwdev)) {
+ nicif_warn(nic_dev, drv, netdev,
+ "Current function don't support to set mac\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) {
+ nicif_info(nic_dev, drv, netdev,
+ "Already using mac address %pM\n",
+ saddr->sa_data);
+ return 0;
+ }
+
+ err = hinic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data,
+ 0, hinic_global_func_id(nic_dev->hwdev));
+ if (err)
+ return err;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, ETH_ALEN);
+
+ nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n",
+ saddr->sa_data);
+
+ /* TODO: vlan mac address of the device must be modified of kernel
+ * larger than 4.7 (not modified mac address of vlan)
+ */
+
+ return 0;
+}
+
+static int
+hinic_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto,
+ u16 vid)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u16 func_id = hinic_global_func_id(nic_dev->hwdev);
+ u32 col, line;
+ int err;
+
+ col = VID_COL(nic_dev, vid);
+ line = VID_LINE(nic_dev, vid);
+
+ err = hinic_add_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to add vlan%d\n", vid);
+ goto end;
+ }
+
+ set_bit(col, &vlan_bitmap[line]);
+
+ nicif_info(nic_dev, drv, netdev, "Add vlan %d\n", vid);
+
+end:
+ return err;
+}
+
+static int
+hinic_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto,
+ u16 vid)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u16 func_id = hinic_global_func_id(nic_dev->hwdev);
+ int err, col, line;
+
+ col = VID_COL(nic_dev, vid);
+ line = VID_LINE(nic_dev, vid);
+
+ err = hinic_del_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
+ goto end;
+ }
+
+ clear_bit(col, &vlan_bitmap[line]);
+
+ nicif_info(nic_dev, drv, netdev, "Remove vlan %d\n", vid);
+
+end:
+ return err;
+}
+
+static int set_features(struct hinic_nic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features, bool force_change)
+{
+ netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ u8 rxvlan_changed = !!(changed & NETIF_F_HW_VLAN_CTAG_RX);
+ u8 rxvlan_en = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+#else
+ u8 rxvlan_changed = !!(changed & NETIF_F_HW_VLAN_RX);
+ u8 rxvlan_en = !!(features & NETIF_F_HW_VLAN_RX);
+#endif
+ u32 lro_timer, lro_buf_size;
+ int err = 0;
+
+ if (changed & NETIF_F_TSO) {
+ err = hinic_set_tx_tso(nic_dev->hwdev,
+ !!(features & NETIF_F_TSO));
+ hinic_info(nic_dev, drv, "%s tso %s\n",
+ (features & NETIF_F_TSO) ? "Enable" : "Disable",
+ err ? "failed" : "success");
+ }
+
+ if (rxvlan_changed) {
+ err = hinic_set_rx_vlan_offload(nic_dev->hwdev, rxvlan_en);
+ hinic_info(nic_dev, drv, "%s rxvlan %s\n",
+ rxvlan_en ? "Enable" : "Disable",
+ err ? "failed" : "success");
+ }
+
+ if (changed & NETIF_F_RXCSUM) {
+ /* hw should always enable rx csum */
+ u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
+
+ err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en);
+ hinic_info(nic_dev, drv, "%s rx csum %s\n",
+ (features & NETIF_F_RXCSUM) ? "Enable" : "Disable",
+ err ? "failed" : "success");
+ }
+
+ if (changed & NETIF_F_LRO) {
+ lro_timer = nic_dev->adaptive_cfg.lro.timer;
+ lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size;
+
+ err = hinic_set_rx_lro_state(nic_dev->hwdev,
+ !!(features & NETIF_F_LRO),
+ lro_timer,
+ lro_buf_size /
+ nic_dev->rx_buff_len);
+ hinic_info(nic_dev, drv, "%s lro %s\n",
+ (features & NETIF_F_LRO) ? "Enable" : "Disable",
+ err ? "failed" : "success");
+ }
+
+ return err;
+}
+
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static int hinic_set_features(struct net_device *netdev, u32 features)
+#else
+static int hinic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+#endif
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return set_features(nic_dev, nic_dev->netdev->features,
+ features, false);
+}
+
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static u32 hinic_fix_features(struct net_device *netdev, u32 features)
+#else
+static netdev_features_t hinic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+#endif
+{
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
+{
+ int err;
+
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ if (FUNC_SUPPORT_DCB(nic_dev->hwdev)) {
+ err = hinic_dcb_reset_hw_config(nic_dev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb
configuration\n");
+ return -EFAULT;
+ }
+ }
+
+ if (FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ err = hinic_reset_port_link_cfg(nic_dev->hwdev);
+ if (err)
+ return -EFAULT;
+ }
+
+ if (enable_bp) {
+ nic_dev->bp_upper_thd = (u16)bp_upper_thd;
+ nic_dev->bp_lower_thd = (u16)bp_lower_thd;
+ err = hinic_set_bp_thd(nic_dev->hwdev,
+ nic_dev->bp_lower_thd);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev,
+ "Failed to set bp lower threshold\n");
+ return -EFAULT;
+ }
+
+ set_bit(HINIC_BP_ENABLE, &nic_dev->flags);
+ } else {
+ err = hinic_disable_fw_bp(nic_dev->hwdev);
+ if (err)
+ return -EFAULT;
+
+ clear_bit(HINIC_BP_ENABLE, &nic_dev->flags);
+ }
+
+ hinic_set_anti_attack(nic_dev->hwdev, true);
+
+ if (set_link_status_follow < HINIC_LINK_FOLLOW_STATUS_MAX &&
+ FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ err = hinic_set_link_status_follow(nic_dev->hwdev,
+ set_link_status_follow);
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED)
+ nic_warn(&nic_dev->pdev->dev,
+ "Current version of firmware don't support to set link status follow port
status\n");
+ }
+ }
+
+ /* enable all hw features in netdev->features */
+ return set_features(nic_dev, 0, nic_dev->netdev->features, true);
+}
+
+#ifdef NETIF_F_HW_TC
+#ifdef TC_MQPRIO_HW_OFFLOAD_MAX
+static int hinic_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt *mqprio)
+{
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ return hinic_setup_tc(dev, mqprio->num_tc);
+}
+#endif /* TC_MQPRIO_HW_OFFLOAD_MAX */
+
+#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV)
+static int __hinic_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX)
+static int __hinic_setup_tc(struct net_device *dev, __always_unused u32 handle,
+ u32 chain_index, __always_unused __be16 proto,
+ struct tc_to_netdev *tc)
+#else
+static int __hinic_setup_tc(struct net_device *dev, __always_unused u32 handle,
+ __always_unused __be16 proto,
+ struct tc_to_netdev *tc)
+#endif
+{
+#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+ unsigned int type = tc->type;
+
+#ifdef HAVE_NDO_SETUP_TC_CHAIN_INDEX
+ if (chain_index)
+ return -EOPNOTSUPP;
+
+#endif
+#endif
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV)
+ return hinic_setup_tc_mqprio(dev, type_data);
+#elif defined(TC_MQPRIO_HW_OFFLOAD_MAX)
+ return hinic_setup_tc_mqprio(dev, tc->mqprio);
+#else
+ return hinic_setup_tc(dev, tc->tc);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif /* NETIF_F_HW_TC */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void hinic_netpoll(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i;
+
+ for (i = 0; i < nic_dev->num_qps; i++)
+ napi_schedule(&nic_dev->irq_cfg[i].napi);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int hinic_uc_sync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return hinic_set_mac(nic_dev->hwdev, addr, 0,
+ hinic_global_func_id(nic_dev->hwdev));
+}
+
+static int hinic_uc_unsync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ /* The addr is in use */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ return hinic_del_mac(nic_dev->hwdev, addr, 0,
+ hinic_global_func_id(nic_dev->hwdev));
+}
+
+static void hinic_clean_mac_list_filter(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct hinic_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) {
+ if (f->state == HINIC_MAC_HW_SYNCED)
+ hinic_uc_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) {
+ if (f->state == HINIC_MAC_HW_SYNCED)
+ hinic_uc_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static struct hinic_mac_filter *hinic_find_mac(struct list_head *filter_list,
+ u8 *addr)
+{
+ struct hinic_mac_filter *f;
+
+ list_for_each_entry(f, filter_list, list) {
+ if (ether_addr_equal(addr, f->addr))
+ return f;
+ }
+ return NULL;
+}
+
+static struct hinic_mac_filter
+ *hinic_add_filter(struct hinic_nic_dev *nic_dev,
+ struct list_head *mac_filter_list, u8 *addr)
+{
+ struct hinic_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ goto out;
+
+ memcpy(f->addr, addr, ETH_ALEN);
+
+ INIT_LIST_HEAD(&f->list);
+ list_add_tail(&f->list, mac_filter_list);
+
+ f->state = HINIC_MAC_WAIT_HW_SYNC;
+ set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
+
+out:
+ return f;
+}
+
+static void hinic_del_filter(struct hinic_nic_dev *nic_dev,
+ struct hinic_mac_filter *f)
+{
+ set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
+
+ if (f->state == HINIC_MAC_WAIT_HW_SYNC) {
+ /* have not added to hw, delete it directly */
+ list_del(&f->list);
+ kfree(f);
+ return;
+ }
+
+ f->state = HINIC_MAC_WAIT_HW_UNSYNC;
+}
+
+static struct hinic_mac_filter
+ *hinic_mac_filter_entry_clone(struct hinic_mac_filter *src)
+{
+ struct hinic_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+
+ *f = *src;
+ INIT_LIST_HEAD(&f->list);
+
+ return f;
+}
+
+static void hinic_undo_del_filter_entries(struct list_head *filter_list,
+ struct list_head *from)
+{
+ struct hinic_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ if (hinic_find_mac(filter_list, f->addr))
+ continue;
+
+ if (f->state == HINIC_MAC_HW_SYNCED)
+ f->state = HINIC_MAC_WAIT_HW_UNSYNC;
+
+ list_move_tail(&f->list, filter_list);
+ }
+}
+
+static void hinic_undo_add_filter_entries(struct list_head *filter_list,
+ struct list_head *from)
+{
+ struct hinic_mac_filter *f, *ftmp, *tmp;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ tmp = hinic_find_mac(filter_list, f->addr);
+ if (tmp && tmp->state == HINIC_MAC_HW_SYNCED)
+ tmp->state = HINIC_MAC_WAIT_HW_SYNC;
+ }
+}
+
+static void hinic_cleanup_filter_list(struct list_head *head)
+{
+ struct hinic_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, head, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static int hinic_mac_filter_sync_hw(struct hinic_nic_dev *nic_dev,
+ struct list_head *del_list,
+ struct list_head *add_list)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct hinic_mac_filter *f, *ftmp;
+ int err = 0, add_count = 0;
+
+ if (!list_empty(del_list)) {
+ list_for_each_entry_safe(f, ftmp, del_list, list) {
+ err = hinic_uc_unsync(netdev, f->addr);
+ if (err) { /* ignore errors when delete mac */
+ nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n");
+ }
+
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+
+ if (!list_empty(add_list)) {
+ list_for_each_entry_safe(f, ftmp, add_list, list) {
+ err = hinic_uc_sync(netdev, f->addr);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to add mac\n");
+ return err;
+ }
+
+ add_count++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+
+ return add_count;
+}
+
+static int hinic_mac_filter_sync(struct hinic_nic_dev *nic_dev,
+ struct list_head *mac_filter_list, bool uc)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct list_head tmp_del_list, tmp_add_list;
+ struct hinic_mac_filter *f, *ftmp, *fclone;
+ int err = 0, add_count = 0;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+ INIT_LIST_HEAD(&tmp_add_list);
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC_MAC_WAIT_HW_UNSYNC)
+ continue;
+
+ f->state = HINIC_MAC_HW_UNSYNCED;
+ list_move_tail(&f->list, &tmp_del_list);
+ }
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC_MAC_WAIT_HW_SYNC)
+ continue;
+
+ fclone = hinic_mac_filter_entry_clone(f);
+ if (!fclone) {
+ err = -ENOMEM;
+ break;
+ }
+
+ f->state = HINIC_MAC_HW_SYNCED;
+ list_add_tail(&fclone->list, &tmp_add_list);
+ }
+
+ if (err) {
+ hinic_undo_del_filter_entries(mac_filter_list, &tmp_del_list);
+ hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
+ nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n");
+ }
+
+ if (err) {
+ hinic_cleanup_filter_list(&tmp_del_list);
+ hinic_cleanup_filter_list(&tmp_add_list);
+ return -ENOMEM;
+ }
+
+ add_count =
+ hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list);
+ if (list_empty(&tmp_add_list))
+ return add_count;
+
+ /* there are errors when add mac to hw, delete all mac in hw */
+ hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
+ /* VF don't support to enter promisc mode,
+ * so we can't delete any other uc mac
+ */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) {
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC_MAC_HW_SYNCED)
+ continue;
+
+ fclone = hinic_mac_filter_entry_clone(f);
+ if (!fclone)
+ break;
+
+ f->state = HINIC_MAC_WAIT_HW_SYNC;
+ list_add_tail(&fclone->list, &tmp_del_list);
+ }
+ }
+
+ hinic_cleanup_filter_list(&tmp_add_list);
+ hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list);
+
+ /* need to enter promisc/allmulti mode */
+ return -ENOMEM;
+}
+
+static void hinic_mac_filter_sync_all(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ int add_count;
+
+ if (test_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags)) {
+ clear_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
+ add_count = hinic_mac_filter_sync(nic_dev,
+ &nic_dev->uc_filter_list,
+ true);
+ if (add_count < 0 && !HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ set_bit(HINIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state);
+ nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n");
+ } else if (add_count) {
+ clear_bit(HINIC_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+
+ add_count = hinic_mac_filter_sync(nic_dev,
+ &nic_dev->mc_filter_list,
+ false);
+ if (add_count < 0) {
+ set_bit(HINIC_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n");
+ } else if (add_count) {
+ clear_bit(HINIC_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+ }
+}
+
+#define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \
+ HINIC_RX_MODE_BC)
+
+static void hinic_update_mac_filter(struct hinic_nic_dev *nic_dev,
+ struct netdev_hw_addr_list *src_list,
+ struct list_head *filter_list)
+{
+ struct netdev_hw_addr *ha;
+ struct hinic_mac_filter *f, *ftmp, *filter;
+
+ /* add addr if not already in the filter list */
+ netif_addr_lock_bh(nic_dev->netdev);
+ netdev_hw_addr_list_for_each(ha, src_list) {
+ filter = hinic_find_mac(filter_list, ha->addr);
+ if (!filter)
+ hinic_add_filter(nic_dev, filter_list, ha->addr);
+ else if (filter->state == HINIC_MAC_WAIT_HW_UNSYNC)
+ filter->state = HINIC_MAC_HW_SYNCED;
+ }
+ netif_addr_unlock_bh(nic_dev->netdev);
+
+ /* delete addr if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, filter_list, list) {
+ bool found = false;
+
+ netif_addr_lock_bh(nic_dev->netdev);
+ netdev_hw_addr_list_for_each(ha, src_list)
+ if (ether_addr_equal(ha->addr, f->addr)) {
+ found = true;
+ break;
+ }
+ netif_addr_unlock_bh(nic_dev->netdev);
+
+ if (found)
+ continue;
+
+ hinic_del_filter(nic_dev, f);
+ }
+}
+
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+static void hinic_update_mc_filter(struct hinic_nic_dev *nic_dev,
+ struct list_head *filter_list)
+{
+ struct dev_mc_list *ha;
+ struct hinic_mac_filter *f, *ftmp, *filter;
+
+ /* add addr if not already in the filter list */
+ netif_addr_lock_bh(nic_dev->netdev);
+ netdev_for_each_mc_addr(ha, nic_dev->netdev) {
+ filter = hinic_find_mac(filter_list, ha->da_addr);
+ if (!filter)
+ hinic_add_filter(nic_dev, filter_list, ha->da_addr);
+ else if (filter->state == HINIC_MAC_WAIT_HW_UNSYNC)
+ filter->state = HINIC_MAC_HW_SYNCED;
+ }
+ netif_addr_unlock_bh(nic_dev->netdev);
+ /* delete addr if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, filter_list, list) {
+ bool found = false;
+
+ netif_addr_lock_bh(nic_dev->netdev);
+ netdev_for_each_mc_addr(ha, nic_dev->netdev)
+ if (ether_addr_equal(ha->da_addr, f->addr)) {
+ found = true;
+ break;
+ }
+ netif_addr_unlock_bh(nic_dev->netdev);
+
+ if (found)
+ continue;
+
+ hinic_del_filter(nic_dev, f);
+ }
+}
+#endif
+
+static void __update_mac_filter(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+
+ if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
+ netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
+ nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
+ nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
+
+ hinic_update_mac_filter(nic_dev, &netdev->uc,
+ &nic_dev->uc_filter_list);
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ hinic_update_mac_filter(nic_dev, &netdev->mc,
+ &nic_dev->mc_filter_list);
+#else
+ hinic_update_mc_filter(nic_dev, &nic_dev->mc_filter_list);
+#endif
+ }
+}
+
+static void hinic_set_rx_mode_work(struct work_struct *work)
+{
+ struct hinic_nic_dev *nic_dev =
+ container_of(work, struct hinic_nic_dev, rx_mode_work);
+ struct net_device *netdev = nic_dev->netdev;
+ int promisc_en = 0, allmulti_en = 0;
+ int err = 0;
+
+ __update_mac_filter(nic_dev);
+
+ hinic_mac_filter_sync_all(nic_dev);
+
+ /* VF don't support to enter promisc mode */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ promisc_en = !!(netdev->flags & IFF_PROMISC) ||
+ test_bit(HINIC_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+
+ allmulti_en = !!(netdev->flags & IFF_ALLMULTI) ||
+ test_bit(HINIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state);
+
+ if (promisc_en !=
+ test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) ||
+ allmulti_en !=
+ test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) {
+ enum hinic_rx_mod rx_mod = HINIC_DEFAULT_RX_MODE;
+
+ rx_mod |= (promisc_en ? HINIC_RX_MODE_PROMISC : 0);
+ rx_mod |= (allmulti_en ? HINIC_RX_MODE_MC_ALL : 0);
+
+ /* FOR DEBUG */
+ if (promisc_en !=
+ test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state))
+ nicif_info(nic_dev, drv, netdev,
+ "%s promisc mode\n",
+ promisc_en ? "Enter" : "Left");
+ if (allmulti_en !=
+ test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state))
+ nicif_info(nic_dev, drv, netdev,
+ "%s all_multi mode\n",
+ allmulti_en ? "Enter" : "Left");
+
+ err = hinic_set_rx_mode(nic_dev->hwdev, rx_mod);
+ if (!err) {
+ promisc_en ?
+ set_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state);
+
+ allmulti_en ?
+ set_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state);
+ } else {
+ nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n");
+ }
+ }
+}
+
+static void hinic_nic_set_rx_mode(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (FUNC_SUPPORT_RX_MODE(nic_dev->hwdev))
+ queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
+}
+
+static const struct net_device_ops hinic_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_start_xmit = hinic_xmit_frame,
+
+#ifdef HAVE_NDO_GET_STATS64
+ .ndo_get_stats64 = hinic_get_stats64,
+#else
+ .ndo_get_stats = hinic_get_stats,
+#endif /* HAVE_NDO_GET_STATS64 */
+
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_select_queue = hinic_select_queue,
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU
+ .extended.ndo_change_mtu = hinic_change_mtu,
+#else
+ .ndo_change_mtu = hinic_change_mtu,
+#endif
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
- .ndo_set_rx_mode = hinic_set_rx_mode,
+#endif
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+ /* RHEL7 requires this to be defined to enable extended ops. RHEL7
+ * uses the function get_ndo_ext to retrieve offsets for extended
+ * fields from with the net_device_ops struct and ndo_size is checked
+ * to determine whether or not the offset is valid.
+ */
+ .ndo_size = sizeof(const struct net_device_ops),
+#endif
+#ifdef IFLA_VF_MAX
+ .ndo_set_vf_mac = hinic_ndo_set_vf_mac,
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+ .extended.ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
+#else
+ .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
+#endif
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
+#else
+ .ndo_set_vf_tx_rate = hinic_ndo_set_vf_bw,
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+
+ .ndo_get_vf_config = hinic_ndo_get_vf_config,
+#endif
+
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
+ .extended.ndo_setup_tc_rh = __hinic_setup_tc,
+#else
+#ifdef HAVE_SETUP_TC
+#ifdef NETIF_F_HW_TC
+ .ndo_setup_tc = __hinic_setup_tc,
+#else
+ .ndo_setup_tc = hinic_setup_tc,
+#endif /* NETIF_F_HW_TC */
+#endif /* HAVE_SETUP_TC */
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = hinic_netpoll,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+ .ndo_set_rx_mode = hinic_nic_set_rx_mode,
+
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext hinic_netdev_ops_ext = {
+ .size = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+ .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
+#endif
+
+#ifdef HAVE_NDO_SET_FEATURES
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
+};
+
+static const struct net_device_ops hinicvf_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
.ndo_start_xmit = hinic_xmit_frame,
+
+#ifdef HAVE_NDO_GET_STATS64
+ .ndo_get_stats64 = hinic_get_stats64,
+#else
+ .ndo_get_stats = hinic_get_stats,
+#endif /* HAVE_NDO_GET_STATS64 */
+
.ndo_tx_timeout = hinic_tx_timeout,
- .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_select_queue = hinic_select_queue,
+
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+ /* RHEL7 requires this to be defined to enable extended ops. RHEL7
+ * uses the function get_ndo_ext to retrieve offsets for extended
+ * fields from with the net_device_ops struct and ndo_size is checked
+ * to determine whether or not the offset is valid.
+ */
+ .ndo_size = sizeof(const struct net_device_ops),
+#endif
+
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU
+ .extended.ndo_change_mtu = hinic_change_mtu,
+#else
+ .ndo_change_mtu = hinic_change_mtu,
+#endif
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = hinic_netpoll,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+ .ndo_set_rx_mode = hinic_nic_set_rx_mode,
+
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext hinicvf_netdev_ops_ext = {
+ .size = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+
+#ifdef HAVE_NDO_SET_FEATURES
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
};
-static void netdev_features_init(struct net_device *netdev)
-{
- netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
+static void netdev_feature_init(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+#ifdef HAVE_NDO_SET_FEATURES
+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ netdev_features_t hw_features;
+#else
+ u32 hw_features;
+#endif
+#endif
+
+ netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 | NETIF_F_RXCSUM;
+
+ if (FUNC_SUPPORT_SCTP_CRC(nic_dev->hwdev))
+ netdev->features |= NETIF_F_SCTP_CRC;
+
+ netdev->vlan_features = netdev->features;
+
+#ifdef HAVE_ENCAPSULATION_TSO
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+#endif /* HAVE_ENCAPSULATION_TSO */
+
+ if (FUNC_SUPPORT_HW_VLAN(nic_dev->hwdev)) {
+#if defined(NETIF_F_HW_VLAN_CTAG_TX)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+#elif defined(NETIF_F_HW_VLAN_TX)
+ netdev->features |= NETIF_F_HW_VLAN_TX;
+#endif
+
+#if defined(NETIF_F_HW_VLAN_CTAG_RX)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+#elif defined(NETIF_F_HW_VLAN_RX)
+ netdev->features |= NETIF_F_HW_VLAN_RX;
+#endif
+ }
+
+#ifdef HAVE_NDO_SET_FEATURES
+ /* copy netdev features into list of user selectable features */
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ hw_features = get_netdev_hw_features(netdev);
+#else
+ hw_features = netdev->hw_features;
+#endif
+ hw_features |= netdev->features;
+#endif
+ if (FUNC_SUPPORT_LRO(nic_dev->hwdev)) {
+ /* LRO is disable in default, only set hw features */
+ hw_features |= NETIF_F_LRO;
+
+ /* Enable LRO */
+ if (nic_dev->adaptive_cfg.lro.enable &&
+ !HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ netdev->features |= NETIF_F_LRO;
+ }
+
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ set_netdev_hw_features(netdev, hw_features);
+#else
+ netdev->hw_features = hw_features;
+#endif
+#endif
+
+/* Set after hw_features because this could not be part of hw_features */
+#if defined(NETIF_F_HW_VLAN_CTAG_FILTER)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#elif defined(NETIF_F_HW_VLAN_FILTER)
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif
+
+#ifdef IFF_UNICAST_FLT
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+#endif
+
+#ifdef HAVE_ENCAPSULATION_CSUM
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SCTP_CRC | NETIF_F_SG
+#ifdef HAVE_ENCAPSULATION_TSO
+ | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN
+ | NETIF_F_GSO_UDP_TUNNEL_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL;
+
+#endif /* HAVE_ENCAPSULATION_TSO */
+#endif /* HAVE_ENCAPSULATION_CSUM */
+}
+
+#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) { \
+ if ((num_qps) > (nic_dev)->max_qps) \
+ nic_warn(&nic_dev->pdev->dev, \
+ "Module Parameter %s value %d is out of range, "\
+ "Maximum value for the device: %d, using %d\n",\
+ #num_qps, num_qps, (nic_dev)->max_qps, \
+ (nic_dev)->max_qps); \
+ if (!(num_qps) || (num_qps) > (nic_dev)->max_qps) \
+ out_qps = (nic_dev)->max_qps; \
+ else \
+ out_qps = num_qps; \
+}
+
+static void hinic_try_to_enable_rss(struct hinic_nic_dev *nic_dev)
+{
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ int i, node, err = 0;
+ u16 num_cpus = 0;
+
+ nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev);
+ if (nic_dev->max_qps <= 1) {
+ clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev,
+ "Failed to alloc tmpl_idx for rss, can't enable rss for this
function\n");
+ clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->max_qps = 1;
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ set_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+
+ nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev);
+
+ MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, nic_dev->num_qps);
+
+ for (i = 0; i < (int)num_online_cpus(); i++) {
+ node = (int)cpu_to_node(i);
+ if (node == dev_to_node(&nic_dev->pdev->dev))
+ num_cpus++;
+ }
+
+ if (!num_cpus)
+ num_cpus = (u16)num_online_cpus();
+
+ nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
+
+ nic_dev->rss_limit = nic_dev->num_qps;
+ nic_dev->num_rss = nic_dev->num_qps;
+
+ hinic_init_rss_parameters(nic_dev->netdev);
+ hinic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc);
+}
+
+static int hinic_sw_init(struct hinic_nic_dev *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ sema_init(&adapter->port_state_sem, 1);
+
+ err = hinic_dcb_init(adapter);
+ if (err) {
+ nic_err(&adapter->pdev->dev, "Failed to init dcb\n");
+ return -EFAULT;
+ }
+
+ if (HINIC_FUNC_IS_VF(adapter->hwdev)) {
+ err = hinic_sq_cos_mapping(netdev);
+ if (err) {
+ nic_err(&adapter->pdev->dev, "Failed to set sq_cos_mapping\n");
+ return -EFAULT;
+ }
+ }
+
+ adapter->sq_depth = HINIC_SQ_DEPTH;
+ adapter->rq_depth = HINIC_RQ_DEPTH;
+
+ hinic_try_to_enable_rss(adapter);
+
+ err = hinic_get_default_mac(adapter->hwdev, netdev->dev_addr);
+ if (err) {
+ nic_err(&adapter->pdev->dev, "Failed to get MAC address\n");
+ goto get_mac_err;
+ }
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!HINIC_FUNC_IS_VF(adapter->hwdev)) {
+ nic_err(&adapter->pdev->dev, "Invalid MAC address\n");
+ err = -EIO;
+ goto err_mac;
+ }
+
+ nic_info(&adapter->pdev->dev, "Invalid MAC address %pM, using
random\n",
+ netdev->dev_addr);
+ eth_hw_addr_random(netdev);
+ }
+
+ err = hinic_set_mac(adapter->hwdev, netdev->dev_addr, 0,
+ hinic_global_func_id(adapter->hwdev));
+ /* When this is VF driver, we must consider that PF has already set VF
+ * MAC, and we can't consider this condition is error status during
+ * driver probe procedure.
+ */
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
+ nic_err(&adapter->pdev->dev, "Failed to set default MAC\n");
+ goto set_mac_err;
+ }
+
+ /* MTU range: 256 - 9600 */
+#ifdef HAVE_NETDEVICE_MIN_MAX_MTU
+ netdev->min_mtu = HINIC_MIN_MTU_SIZE;
+ netdev->max_mtu = HINIC_MAX_JUMBO_FRAME_SIZE;
+#endif
- netdev->vlan_features = netdev->hw_features;
+#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU
+ netdev->extended->min_mtu = HINIC_MIN_MTU_SIZE;
+ netdev->extended->max_mtu = HINIC_MAX_JUMBO_FRAME_SIZE;
+#endif
+ return 0;
+
+set_mac_err:
+err_mac:
+get_mac_err:
+ if (test_bit(HINIC_RSS_ENABLE, &adapter->flags))
+ hinic_rss_template_free(adapter->hwdev, adapter->rss_tmpl_idx);
- netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ return err;
}
-/**
- * link_status_event_handler - link event handler
- * @handle: nic device for the handler
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_in: output buffer
- * @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
- **/
-static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+static void hinic_assign_netdev_ops(struct hinic_nic_dev *adapter)
{
- struct hinic_port_link_status *link_status, *ret_link_status;
- struct hinic_dev *nic_dev = handle;
+ if (!HINIC_FUNC_IS_VF(adapter->hwdev)) {
+ adapter->netdev->netdev_ops = &hinic_netdev_ops;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ set_netdev_ops_ext(adapter->netdev, &hinic_netdev_ops_ext);
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+ if (FUNC_SUPPORT_DCB(adapter->hwdev))
+ adapter->netdev->dcbnl_ops = &hinic_dcbnl_ops;
+ /* Will implement hinic_set_ethtool_ops() in next patch. */
+ } else {
+ adapter->netdev->netdev_ops = &hinicvf_netdev_ops;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ set_netdev_ops_ext(adapter->netdev, &hinicvf_netdev_ops_ext);
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+ /* Will implement hinic_set_ethtool_ops() in next patch. */
+ }
+ adapter->netdev->watchdog_timeo = 5 * HZ;
+}
- link_status = buf_in;
+#define HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT 1
+#define HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER 1
+#define HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER 2
+#define HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER 3
+#define HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC_DFT_PG_100GE_TXRX_MSIX_COALESC_TIMER 2
+#define HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER 3
- if (link_status->link == HINIC_LINK_STATE_UP) {
- down(&nic_dev->mgmt_lock);
+static void init_intr_coal_param(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_intr_coal_info *info;
+ struct pci_device_id *id;
+ u16 i;
+
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ switch (id->driver_data) {
+ case HINIC_BOARD_10GE:
+ case HINIC_BOARD_PG_TP_10GE:
+ nic_dev->his_link_speed = SPEED_10000;
+ break;
+ case HINIC_BOARD_25GE:
+ case HINIC_BOARD_PG_SM_25GE:
+ nic_dev->his_link_speed = SPEED_25000;
+ break;
+ case HINIC_BOARD_40GE:
+ nic_dev->his_link_speed = SPEED_40000;
+ break;
+ case HINIC_BOARD_100GE:
+ case HINIC_BOARD_PG_100GE:
+ nic_dev->his_link_speed = SPEED_100000;
+ break;
+ default:
+ break;
+ }
- nic_dev->flags |= HINIC_LINK_UP;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ info = &nic_dev->intr_coalesce[i];
+ if (!nic_dev->intr_coal_set_flag) {
+ switch (id->driver_data) {
+ case HINIC_BOARD_PG_TP_10GE:
+ info->pending_limt =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER;
+ break;
+ case HINIC_BOARD_PG_SM_25GE:
+ info->pending_limt =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg =
+ HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER;
+ break;
+ case HINIC_BOARD_PG_100GE:
+ info->pending_limt =
+ HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg =
+ HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER;
+ break;
+ default:
+ info->pending_limt = qp_pending_limit;
+ info->coalesce_timer_cfg = qp_coalesc_timer_cfg;
+ break;
+ }
+ }
- if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
- (HINIC_LINK_UP | HINIC_INTF_UP)) {
- netif_carrier_on(nic_dev->netdev);
- netif_tx_wake_all_queues(nic_dev->netdev);
+ info->resend_timer_cfg =
+ HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ info->pkt_rate_high = HINIC_RX_RATE_HIGH;
+ info->rx_usecs_high = HINIC_RX_COAL_TIME_HIGH;
+ info->rx_pending_limt_high = HINIC_RX_PENDING_LIMIT_HIGH;
+ info->pkt_rate_low = HINIC_RX_RATE_LOW;
+ info->rx_usecs_low = HINIC_RX_COAL_TIME_LOW;
+ info->rx_pending_limt_low = HINIC_RX_PENDING_LIMIT_LOW;
+
+ if (nic_dev->in_vm) {
+ info->pkt_rate_low = HINIC_RX_RATE_LOW_VM;
+ info->rx_pending_limt_high =
+ HINIC_RX_PENDING_LIMIT_HIGH_VM;
}
+ }
+}
+
+static int hinic_init_intr_coalesce(struct hinic_nic_dev *nic_dev)
+{
+ u64 size;
+
+ if (qp_pending_limit != HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT ||
+ qp_coalesc_timer_cfg != HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG)
+ nic_dev->intr_coal_set_flag = 1;
+ else
+ nic_dev->intr_coal_set_flag = 0;
+
+ size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps;
+ if (!size) {
+ nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr
coalesce\n");
+ return -EINVAL;
+ }
+ nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL);
+ if (!nic_dev->intr_coalesce) {
+ nic_err(&nic_dev->pdev->dev, "Failed to alloc intr coalesce\n");
+ return -ENOMEM;
+ }
+
+ init_intr_coal_param(nic_dev);
+
+ if (test_bit(HINIC_INTR_ADAPT, &nic_dev->flags))
+ nic_dev->adaptive_rx_coal = 1;
+ else
+ nic_dev->adaptive_rx_coal = 0;
+
+ return 0;
+}
+
+static void hinic_free_intr_coalesce(struct hinic_nic_dev *nic_dev)
+{
+ kfree(nic_dev->intr_coalesce);
+}
+
+static int hinic_alloc_qps(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ int err;
+
+ err = hinic_alloc_txqs(netdev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n");
+ return err;
+ }
+
+ err = hinic_alloc_rxqs(netdev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n");
+ goto alloc_rxqs_err;
+ }
+
+ err = hinic_init_intr_coalesce(nic_dev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n");
+ goto init_intr_err;
+ }
+
+ return 0;
- up(&nic_dev->mgmt_lock);
+init_intr_err:
+ hinic_free_rxqs(netdev);
- netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n");
+alloc_rxqs_err:
+ hinic_free_txqs(netdev);
+
+ return err;
+}
+
+static void hinic_destroy_qps(struct hinic_nic_dev *nic_dev)
+{
+ hinic_free_intr_coalesce(nic_dev);
+ hinic_free_rxqs(nic_dev->netdev);
+ hinic_free_txqs(nic_dev->netdev);
+}
+
+static int hinic_validate_parameters(struct hinic_lld_dev *lld_dev)
+{
+ struct pci_dev *pdev = lld_dev->pdev;
+
+ if (bp_upper_thd < bp_lower_thd || bp_lower_thd == 0) {
+ nic_warn(&pdev->dev, "Module Parameter bp_upper_thd: %d, bp_lower_thd: %d
is invalid, resetting to default\n",
+ bp_upper_thd, bp_lower_thd);
+ bp_lower_thd = HINIC_RX_BP_LOWER_THD;
+ bp_upper_thd = HINIC_RX_BP_UPPER_THD;
+ }
+
+ if (!poll_weight) {
+ nic_warn(&pdev->dev, "Module Parameter poll_weight can not be 0, resetting
to %d\n",
+ DEFAULT_POLL_WEIGHT);
+ poll_weight = DEFAULT_POLL_WEIGHT;
+ }
+
+ /* check rx_buff value, default rx_buff is 2KB.
+ * Invalid rx_buff include 2KB/4KB/8KB/16KB.
+ */
+ if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB &&
+ rx_buff != RX_BUFF_VALID_8KB && rx_buff != RX_BUFF_VALID_16KB) {
+ nic_warn(&pdev->dev, "Module Parameter rx_buff value %d is out of range,
must be 2^n. Valid range is 2 - 16, resetting to %dKB",
+ rx_buff, DEFAULT_RX_BUFF_LEN);
+ rx_buff = DEFAULT_RX_BUFF_LEN;
+ }
+
+ return 0;
+}
+
+static void check_lro_module_param(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro;
+
+ /* Use module parameters first. */
+ if (set_lro_timer != 0 &&
+ set_lro_timer >= HINIC_LRO_RX_TIMER_LOWER &&
+ set_lro_timer <= HINIC_LRO_RX_TIMER_UPPER)
+ lro->timer = set_lro_timer;
+
+ /* Use module parameters first. */
+ if (set_max_wqe_num != 0 &&
+ set_max_wqe_num <= HINIC_LRO_MAX_WQE_NUM_UPPER &&
+ set_max_wqe_num >= HINIC_LRO_MAX_WQE_NUM_LOWER)
+ lro->buffer_size = set_max_wqe_num * nic_dev->rx_buff_len;
+}
+
+static void decide_rss_cfg(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_environment_info *info = &nic_dev->env_info;
+
+ switch (info->cpu) {
+ case HINIC_CPU_ARM_GENERIC:
+ set_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+
+ break;
+ case HINIC_CPU_X86_GENERIC:
+ clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+
+ break;
+
+ default:
+ clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+ break;
+ }
+}
+
+static void decide_lro_cfg(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_environment_info *info = &nic_dev->env_info;
+ struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro;
+
+ if (lro_en_status < HINIC_LRO_STATUS_UNSET) {
+ lro->enable = lro_en_status;
} else {
- down(&nic_dev->mgmt_lock);
+ /* LRO will be opened in all Huawei OS */
+ switch (info->os) {
+ case HINIC_OS_HUAWEI:
+ lro->enable = 1;
+ break;
+ case HINIC_OS_NON_HUAWEI:
+ lro->enable = 0;
+ break;
+ default:
+ lro->enable = 0;
+ break;
+ }
+ }
+
+ switch (info->board) {
+ case HINIC_BOARD_25GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_25GE;
+ break;
+ case HINIC_BOARD_100GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_100GE;
+ break;
+ case HINIC_BOARD_PG_TP_10GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE;
+ break;
+ case HINIC_BOARD_PG_SM_25GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT;
+ break;
+ case HINIC_BOARD_PG_100GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE;
+ break;
+ default:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT;
+ break;
+ }
+
+ /* Use module parameters first. */
+ switch (info->cpu) {
+ case HINIC_CPU_ARM_GENERIC:
+ lro->buffer_size =
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM *
+ nic_dev->rx_buff_len;
+ break;
+ case HINIC_CPU_X86_GENERIC:
+ lro->buffer_size =
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 *
+ nic_dev->rx_buff_len;
+ break;
+ default:
+ lro->buffer_size =
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT *
+ nic_dev->rx_buff_len;
+ break;
+ }
- nic_dev->flags &= ~HINIC_LINK_UP;
+ /* lro buffer_size need modify according board type */
+ switch (info->board) {
+ case HINIC_BOARD_PG_TP_10GE:
+ case HINIC_BOARD_PG_SM_25GE:
+ case HINIC_BOARD_PG_100GE:
+ lro->buffer_size =
+ HINIC_LRO_WQE_NUM_PANGEA_DEFAULT * nic_dev->rx_buff_len;
+ break;
+ default:
+ break;
+ }
- netif_carrier_off(nic_dev->netdev);
- netif_tx_disable(nic_dev->netdev);
+ check_lro_module_param(nic_dev);
- up(&nic_dev->mgmt_lock);
+ nic_info(&nic_dev->pdev->dev,
+ "LRO default configuration: enable %u, timer %u, buffer size %u\n",
+ lro->enable, lro->timer, lro->buffer_size);
+}
- netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
+static void decide_intr_cfg(struct hinic_nic_dev *nic_dev)
+{
+ struct pci_device_id *id;
+
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ switch (id->driver_data) {
+ case HINIC_BOARD_PG_TP_10GE:
+ case HINIC_BOARD_PG_SM_25GE:
+ case HINIC_BOARD_PG_100GE:
+ clear_bit(HINIC_INTR_ADAPT, &nic_dev->flags);
+ break;
+ default:
+ set_bit(HINIC_INTR_ADAPT, &nic_dev->flags);
+ break;
}
+}
+
+static void adaptive_configuration_init(struct hinic_nic_dev *nic_dev)
+{
+ struct pci_device_id *id;
+
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ if (id)
+ nic_dev->env_info.board = id->driver_data;
+ else
+ nic_dev->env_info.board = HINIC_BOARD_UNKNOWN;
+
+ nic_dev->env_info.os = HINIC_OS_HUAWEI;
- ret_link_status = buf_out;
- ret_link_status->status = 0;
+ nic_dev->env_info.cpu = HINIC_CPU_ARM_GENERIC;
- *out_size = sizeof(*ret_link_status);
+ nic_info(&nic_dev->pdev->dev,
+ "Board type %u, OS type %u, CPU type %u\n",
+ nic_dev->env_info.board, nic_dev->env_info.os,
+ nic_dev->env_info.cpu);
+
+ decide_lro_cfg(nic_dev);
+ decide_rss_cfg(nic_dev);
+ decide_intr_cfg(nic_dev);
}
-/**
- * nic_dev_init - Initialize the NIC device
- * @pdev: the NIC pci device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int nic_dev_init(struct pci_dev *pdev)
-{
- struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_txq_stats *tx_stats;
- struct hinic_rxq_stats *rx_stats;
- struct hinic_dev *nic_dev;
+static int nic_probe(struct hinic_lld_dev *lld_dev, void **uld_dev,
+ char *uld_dev_name)
+{
+ struct pci_dev *pdev = lld_dev->pdev;
+ struct hinic_nic_dev *nic_dev;
struct net_device *netdev;
- struct hinic_hwdev *hwdev;
- int err, num_qps;
+ u16 max_qps;
+ u32 page_num;
+ int err;
- hwdev = hinic_init_hwdev(pdev);
- if (IS_ERR(hwdev)) {
- dev_err(&pdev->dev, "Failed to initialize HW device\n");
- return PTR_ERR(hwdev);
- }
+ /* *uld_dev should always no be NULL */
+ *uld_dev = lld_dev;
- num_qps = hinic_hwdev_num_qps(hwdev);
- if (num_qps <= 0) {
- dev_err(&pdev->dev, "Invalid number of QPS\n");
- err = -EINVAL;
- goto err_num_qps;
+ if (!hinic_support_nic(lld_dev->hwdev, NULL)) {
+ nic_info(&pdev->dev, "Hw don't support nic\n");
+ return 0;
}
- netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps);
+ err = hinic_validate_parameters(lld_dev);
+ if (err)
+ return -EINVAL;
+
+ max_qps = hinic_func_max_nic_qnum(lld_dev->hwdev);
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
if (!netdev) {
- dev_err(&pdev->dev, "Failed to allocate Ethernet device\n");
- err = -ENOMEM;
- goto err_alloc_etherdev;
+ nic_err(&pdev->dev, "Failed to allocate ETH device\n");
+ return -ENOMEM;
}
- netdev->netdev_ops = &hinic_netdev_ops;
- netdev->ethtool_ops = &hinic_ethtool_ops;
- netdev->max_mtu = ETH_MAX_MTU;
- nic_dev = netdev_priv(netdev);
- nic_dev->netdev = netdev;
- nic_dev->hwdev = hwdev;
- nic_dev->msg_enable = MSG_ENABLE_DEFAULT;
- nic_dev->flags = 0;
- nic_dev->txqs = NULL;
- nic_dev->rxqs = NULL;
- nic_dev->tx_weight = tx_weight;
- nic_dev->rx_weight = rx_weight;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nic_dev = (struct hinic_nic_dev *)netdev_priv(netdev);
+ nic_dev->hwdev = lld_dev->hwdev;
+ nic_dev->pdev = pdev;
+ nic_dev->poll_weight = (int)poll_weight;
+ nic_dev->msg_enable = DEFAULT_MSG_ENABLE;
+ nic_dev->heart_status = true;
+ nic_dev->in_vm = !hinic_is_in_host();
+ nic_dev->lro_replenish_thld = lro_replenish_thld;
+ nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT);
+ page_num = (RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len) / PAGE_SIZE;
+ nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0;
- sema_init(&nic_dev->mgmt_lock, 1);
- tx_stats = &nic_dev->tx_stats;
- rx_stats = &nic_dev->rx_stats;
+ mutex_init(&nic_dev->nic_mutex);
- u64_stats_init(&tx_stats->syncp);
- u64_stats_init(&rx_stats->syncp);
+ adaptive_configuration_init(nic_dev);
- nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
- VLAN_BITMAP_SIZE(nic_dev),
- GFP_KERNEL);
+ nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
+ nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n");
err = -ENOMEM;
- goto err_vlan_bitmap;
+ goto vlan_bitmap_err;
}
+ nic_dev->netdev = netdev;
+ hinic_assign_netdev_ops(nic_dev);
+ netdev_feature_init(netdev);
+ /* get nic cap from hw */
+ hinic_support_nic(lld_dev->hwdev, &nic_dev->nic_cap);
- nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME);
- if (!nic_dev->workq) {
- err = -ENOMEM;
- goto err_workq;
+ err = hinic_init_nic_hwdev(nic_dev->hwdev, nic_dev->rx_buff_len);
+ if (err) {
+ nic_err(&pdev->dev, "Failed to init nic hwdev\n");
+ goto init_nic_hwdev_err;
}
- pci_set_drvdata(pdev, netdev);
+ err = hinic_set_super_cqe_state(nic_dev->hwdev, true);
+ if (err) {
+ nic_err(&pdev->dev, "Failed to set super cqe\n");
+ goto set_supper_cqe_err;
+ }
- err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
+ err = hinic_sw_init(nic_dev);
if (err)
- dev_warn(&pdev->dev, "Failed to get mac address\n");
+ goto sw_init_err;
- err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
+ err = hinic_alloc_qps(nic_dev);
if (err) {
- dev_err(&pdev->dev, "Failed to add mac\n");
- goto err_add_mac;
+ nic_err(&pdev->dev, "Failed to alloc qps\n");
+ goto alloc_qps_err;
}
- err = hinic_port_set_mtu(nic_dev, netdev->mtu);
- if (err) {
- dev_err(&pdev->dev, "Failed to set mtu\n");
- goto err_set_mtu;
+ nic_dev->workq = create_singlethread_workqueue(HINIC_NIC_DEV_WQ_NAME);
+ if (!nic_dev->workq) {
+ nic_err(&pdev->dev, "Failed to initialize AEQ workqueue\n");
+ err = -ENOMEM;
+ goto create_workq_err;
}
- rx_mode_work = &nic_dev->rx_mode_work;
- INIT_WORK(&rx_mode_work->work, set_rx_mode);
-
- netdev_features_init(netdev);
+ INIT_LIST_HEAD(&nic_dev->uc_filter_list);
+ INIT_LIST_HEAD(&nic_dev->mc_filter_list);
+ INIT_WORK(&nic_dev->rx_mode_work, hinic_set_rx_mode_work);
- netif_carrier_off(netdev);
-
- hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
- nic_dev, link_status_event_handler);
+ err = hinic_set_default_hw_feature(nic_dev);
+ if (err)
+ goto set_features_err;
- SET_NETDEV_DEV(netdev, &pdev->dev);
+#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN
+ hinic_register_notifier(nic_dev);
+#endif
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "Failed to register netdev\n");
- goto err_reg_netdev;
+ nic_err(&pdev->dev, "Failed to register netdev\n");
+ err = -ENOMEM;
+ goto netdev_err;
}
+ netif_carrier_off(netdev);
+
+ *uld_dev = nic_dev;
+ nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n");
+
return 0;
-err_reg_netdev:
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_LINK_STATUS);
- cancel_work_sync(&rx_mode_work->work);
+netdev_err:
+#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN
+ hinic_unregister_notifier(nic_dev);
+#endif
-err_set_mtu:
-err_add_mac:
- pci_set_drvdata(pdev, NULL);
+set_features_err:
destroy_workqueue(nic_dev->workq);
-err_workq:
-err_vlan_bitmap:
+create_workq_err:
+ hinic_destroy_qps(nic_dev);
+
+alloc_qps_err:
+ hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic_global_func_id(nic_dev->hwdev));
+
+sw_init_err:
+ (void)hinic_set_super_cqe_state(nic_dev->hwdev, false);
+
+set_supper_cqe_err:
+ hinic_free_nic_hwdev(nic_dev->hwdev);
+
+init_nic_hwdev_err:
+ kfree(nic_dev->vlan_bitmap);
+
+vlan_bitmap_err:
free_netdev(netdev);
-err_alloc_etherdev:
-err_num_qps:
- hinic_free_hwdev(hwdev);
return err;
}
-static int hinic_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static void nic_remove(struct hinic_lld_dev *lld_dev, void *adapter)
{
- int err = pci_enable_device(pdev);
+ struct hinic_nic_dev *nic_dev = adapter;
+ struct net_device *netdev;
- if (err) {
- dev_err(&pdev->dev, "Failed to enable PCI device\n");
- return err;
+ if (!nic_dev || !hinic_support_nic(lld_dev->hwdev, NULL))
+ return;
+
+ netdev = nic_dev->netdev;
+
+ unregister_netdev(netdev);
+#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN
+ hinic_unregister_notifier(nic_dev);
+
+#endif
+ cancel_work_sync(&nic_dev->rx_mode_work);
+ destroy_workqueue(nic_dev->workq);
+
+ hinic_destroy_qps(nic_dev);
+
+ hinic_clean_mac_list_filter(nic_dev);
+ hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic_global_func_id(nic_dev->hwdev));
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx);
+
+ (void)hinic_set_super_cqe_state(nic_dev->hwdev, false);
+
+ hinic_free_nic_hwdev(nic_dev->hwdev);
+
+ kfree(nic_dev->vlan_bitmap);
+
+ free_netdev(netdev);
+}
+
+int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ int err, err_netdev = 0;
+
+ nicif_info(nic_dev, drv, netdev, "Start to disable RSS\n");
+
+ if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "RSS not enabled, do nothing\n");
+ return 0;
+ }
+
+ if (netif_running(netdev)) {
+ err_netdev = hinic_close(netdev);
+ if (err_netdev) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
}
- err = pci_request_regions(pdev, HINIC_DRV_NAME);
+ /* free rss template */
+ err = hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx);
if (err) {
- dev_err(&pdev->dev, "Failed to request PCI regions\n");
- goto err_pci_regions;
+ nicif_err(nic_dev, drv, netdev, "Failed to free RSS template\n");
+ } else {
+ nicif_info(nic_dev, drv, netdev, "Success to free RSS template\n");
+ clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
}
- pci_set_master(pdev);
+ if (netif_running(netdev)) {
+ err_netdev = hinic_open(netdev);
+ if (err_netdev)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ }
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Failed to set DMA mask\n");
- goto err_dma_mask;
- }
+ return err ? err : err_netdev;
+}
+
+int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ int err, err_netdev = 0;
+
+ nicif_info(nic_dev, drv, netdev, "Start to enable RSS\n");
+
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "RSS already enabled, do nothing\n");
+ return 0;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_warn(&pdev->dev,
- "Couldn't set 64-bit consistent DMA mask\n");
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "Failed to set consistent DMA mask\n");
- goto err_dma_consistent_mask;
+ if (netif_running(netdev)) {
+ err_netdev = hinic_close(netdev);
+ if (err_netdev) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
}
}
- err = nic_dev_init(pdev);
+ err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx);
if (err) {
- dev_err(&pdev->dev, "Failed to initialize NIC device\n");
- goto err_nic_dev_init;
+ nicif_err(nic_dev, drv, netdev, "Failed to alloc RSS template\n");
+ } else {
+ set_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+ nicif_info(nic_dev, drv, netdev, "Success to alloc RSS template\n");
}
- dev_info(&pdev->dev, "HiNIC driver - probed\n");
- return 0;
+ if (netif_running(netdev)) {
+ err_netdev = hinic_open(netdev);
+ if (err_netdev)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ }
-err_nic_dev_init:
-err_dma_consistent_mask:
-err_dma_mask:
- pci_release_regions(pdev);
+ return err ? err : err_netdev;
+}
-err_pci_regions:
- pci_disable_device(pdev);
- return err;
+static const char *hinic_module_link_err[LINK_ERR_NUM] = {
+ "Unrecognized module",
+};
+
+static void hinic_port_module_event_handler(struct hinic_nic_dev *nic_dev,
+ struct hinic_event_info *event)
+{
+ enum port_module_event_type type = event->module_event.type;
+ enum link_err_type err_type = event->module_event.err_type;
+
+ switch (type) {
+ case HINIC_PORT_MODULE_CABLE_PLUGGED:
+ case HINIC_PORT_MODULE_CABLE_UNPLUGGED:
+ nicif_info(nic_dev, link, nic_dev->netdev,
+ "Port module event: Cable %s\n",
+ type == HINIC_PORT_MODULE_CABLE_PLUGGED ?
+ "plugged" : "unplugged");
+ break;
+ case HINIC_PORT_MODULE_LINK_ERR:
+ if (err_type >= LINK_ERR_NUM) {
+ nicif_info(nic_dev, link, nic_dev->netdev,
+ "Link failed, Unknown error type: 0x%x\n",
+ err_type);
+ } else {
+ nicif_info(nic_dev, link, nic_dev->netdev,
+ "Link failed, error type: 0x%x: %s\n",
+ err_type, hinic_module_link_err[err_type]);
+ }
+ break;
+ default:
+ nicif_err(nic_dev, link, nic_dev->netdev,
+ "Unknown port module type %d\n", type);
+ break;
+ }
}
-static void hinic_remove(struct pci_dev *pdev)
+static void hinic_intr_coalesc_change(struct hinic_nic_dev *nic_dev,
+ struct hinic_event_info *event)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rx_mode_work *rx_mode_work;
+ u32 hw_to_os_speed[LINK_SPEED_LEVELS] = {SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000};
+ u8 qid, coalesc_timer_cfg, pending_limt;
+ struct pci_device_id *id;
+ u32 speed;
+ int err;
- unregister_netdev(netdev);
+ if (nic_dev->adaptive_rx_coal)
+ return;
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_LINK_STATUS);
+ speed = hw_to_os_speed[event->link_info.speed];
+ if (speed == nic_dev->his_link_speed)
+ return;
- rx_mode_work = &nic_dev->rx_mode_work;
- cancel_work_sync(&rx_mode_work->work);
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ switch (id->driver_data) {
+ case HINIC_BOARD_PG_TP_10GE:
+ return;
+ case HINIC_BOARD_PG_SM_25GE:
+ if (speed == SPEED_10000) {
+ pending_limt =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT;
+ coalesc_timer_cfg =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER;
+ } else if (speed == SPEED_25000) {
+ pending_limt =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
+ coalesc_timer_cfg =
+ HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER;
+ } else {
+ pending_limt =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
+ coalesc_timer_cfg =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER;
+ }
+ break;
+ case HINIC_BOARD_PG_100GE:
+ return;
+ default:
+ return;
+ }
- pci_set_drvdata(pdev, NULL);
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->intr_coalesce[qid].user_set_intr_coal_flag) {
+ err = set_interrupt_moder(nic_dev, qid,
+ coalesc_timer_cfg,
+ pending_limt);
+ if (!err) {
+ nic_dev->intr_coalesce[qid].pending_limt =
+ pending_limt;
+ nic_dev->intr_coalesce[qid].coalesce_timer_cfg =
+ coalesc_timer_cfg;
+ }
+ }
+ }
- destroy_workqueue(nic_dev->workq);
+ nic_dev->his_link_speed = speed;
+}
- hinic_free_hwdev(nic_dev->hwdev);
+void nic_event(struct hinic_lld_dev *lld_dev, void *adapter,
+ struct hinic_event_info *event)
+{
+ struct hinic_nic_dev *nic_dev = adapter;
+ struct net_device *netdev;
+ enum hinic_event_type type;
- free_netdev(netdev);
+ if (!nic_dev || !event || !hinic_support_nic(lld_dev->hwdev, NULL))
+ return;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+ netdev = nic_dev->netdev;
+ type = event->type;
- dev_info(&pdev->dev, "HiNIC driver - removed\n");
-}
+ switch (type) {
+ case HINIC_EVENT_LINK_DOWN:
+ hinic_link_status_change(nic_dev, false);
+ break;
+ case HINIC_EVENT_LINK_UP:
+ hinic_link_status_change(nic_dev, true);
+ hinic_intr_coalesc_change(nic_dev, event);
+ break;
+ case HINIC_EVENT_HEART_LOST:
+ hinic_heart_lost(nic_dev);
+ break;
+ case HINIC_EVENT_FAULT:
+ break;
+ case HINIC_EVENT_DCB_STATE_CHANGE:
+ if (nic_dev->default_cos_id == event->dcb_state.default_cos)
+ break;
-static const struct pci_device_id hinic_pci_table[] = {
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
- { 0, 0}
-};
-MODULE_DEVICE_TABLE(pci, hinic_pci_table);
+ /* PF notify to vf, don't need to handle this event */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ break;
-static struct pci_driver hinic_driver = {
- .name = HINIC_DRV_NAME,
- .id_table = hinic_pci_table,
- .probe = hinic_probe,
- .remove = hinic_remove,
-};
+ nicif_info(nic_dev, drv, netdev, "Change default cos %d to %d\n",
+ nic_dev->default_cos_id,
+ event->dcb_state.default_cos);
+
+ nic_dev->default_cos_id = event->dcb_state.default_cos;
+ hinic_set_sq_default_cos(netdev, nic_dev->default_cos_id);
+ break;
+ case HINIC_EVENT_PORT_MODULE_EVENT:
+ hinic_port_module_event_handler(nic_dev, event);
+ break;
+ default:
+ break;
+ }
+}
-module_pci_driver(hinic_driver);
+struct hinic_uld_info nic_uld_info = {
+ .probe = nic_probe,
+ .remove = nic_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .event = nic_event,
+ .ioctl = nic_ioctl,
+}; /*lint -e766*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
new file mode 100644
index 000000000000..c3865d189f1d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NIC_DEV_H
+#define HINIC_NIC_DEV_H
+
+#include <linux/netdevice.h>
+#include <linux/semaphore.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "ossl_knl.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_tx.h"
+#include "hinic_rx.h"
+
+#define HINIC_DRV_NAME "hinic"
+#define HINIC_CHIP_NAME "hinic"
+
+#define HINIC_DRV_VERSION "1.6.2.2"
+struct vf_data_storage;
+
+#define HINIC_FUNC_IS_VF(hwdev) (hinic_func_type(hwdev) == TYPE_VF)
+
+enum hinic_flags {
+ HINIC_INTF_UP,
+ HINIC_MAC_FILTER_CHANGED,
+ HINIC_LP_TEST,
+ HINIC_RSS_ENABLE,
+ HINIC_DCB_ENABLE,
+ HINIC_BP_ENABLE,
+ HINIC_SAME_RXTX,
+ HINIC_INTR_ADAPT,
+};
+
+#define RX_BUFF_NUM_PER_PAGE 2
+#define HINIC_MAX_MAC_NUM 3
+#define LP_PKT_CNT 64
+
+struct hinic_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 state;
+};
+
+enum hinic_rx_mode_state {
+ HINIC_HW_PROMISC_ON,
+ HINIC_HW_ALLMULTI_ON,
+ HINIC_PROMISC_FORCE_ON,
+ HINIC_ALLMULTI_FORCE_ON,
+};
+
+enum mac_filter_state {
+ HINIC_MAC_WAIT_HW_SYNC,
+ HINIC_MAC_HW_SYNCED,
+ HINIC_MAC_WAIT_HW_UNSYNC,
+ HINIC_MAC_HW_UNSYNCED,
+};
+
+struct hinic_mac_filter {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ unsigned long state;
+};
+
+/* TC bandwidth allocation per direction */
+struct hinic_tc_attr {
+ u8 pg_id; /* Priority Group(PG) ID */
+ u8 bw_pct; /* % of PG's bandwidth */
+ u8 up_map; /* User Priority to Traffic Class mapping */
+ u8 prio_type;
+};
+
+/* User priority configuration */
+struct hinic_tc_cfg {
+ struct hinic_tc_attr path[2]; /* One each for Tx/Rx */
+
+ bool pfc_en;
+};
+
+struct hinic_dcb_config {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+
+ bool pfc_state;
+
+ struct hinic_tc_cfg tc_cfg[HINIC_DCB_TC_MAX];
+ u8 bw_pct[2][HINIC_DCB_PG_MAX]; /* One each for Tx/Rx */
+};
+
+enum hinic_intr_flags {
+ HINIC_INTR_ON,
+};
+
+struct hinic_irq {
+ struct net_device *netdev;
+ /* IRQ corresponding index number */
+ u16 msix_entry_idx;
+ u32 irq_id; /* The IRQ number from OS */
+ char irq_name[IFNAMSIZ + 16];
+ struct napi_struct napi;
+ cpumask_t affinity_mask;
+ struct hinic_txq *txq;
+ struct hinic_rxq *rxq;
+ unsigned long intr_flag;
+};
+
+struct hinic_intr_coal_info {
+ u8 pending_limt;
+ u8 coalesce_timer_cfg;
+ u8 resend_timer_cfg;
+
+ u64 pkt_rate_low;
+ u8 rx_usecs_low;
+ u8 rx_pending_limt_low;
+ u64 pkt_rate_high;
+ u8 rx_usecs_high;
+ u8 rx_pending_limt_high;
+
+ u8 user_set_intr_coal_flag;
+};
+
+#define HINIC_NIC_STATS_INC(nic_dev, field) \
+{ \
+ u64_stats_update_begin(&(nic_dev)->stats.syncp); \
+ (nic_dev)->stats.field++; \
+ u64_stats_update_end(&(nic_dev)->stats.syncp); \
+}
+
+struct hinic_nic_stats {
+ u64 netdev_tx_timeout;
+
+ /* Subdivision statistics show in private tool */
+ u64 tx_carrier_off_drop;
+ u64 tx_invalid_qid;
+
+#ifdef HAVE_NDO_GET_STATS64
+ struct u64_stats_sync syncp;
+#else
+ struct u64_stats_sync_empty syncp;
+#endif
+};
+
+struct hinic_nic_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ void *hwdev;
+
+ int poll_weight;
+
+ unsigned long *vlan_bitmap;
+
+ u16 num_qps;
+ u16 max_qps;
+
+ u32 msg_enable;
+ unsigned long flags;
+
+ u16 sq_depth;
+ u16 rq_depth;
+
+ /* mapping from priority */
+ u8 sq_cos_mapping[HINIC_DCB_UP_MAX];
+ u8 default_cos_id;
+ struct hinic_txq *txqs;
+ struct hinic_rxq *rxqs;
+
+ struct nic_service_cap nic_cap;
+
+ struct irq_info *qps_irq_info;
+ struct hinic_irq *irq_cfg;
+ struct work_struct rx_mode_work;
+ struct delayed_work moderation_task;
+ struct workqueue_struct *workq;
+
+ struct list_head uc_filter_list;
+ struct list_head mc_filter_list;
+ unsigned long rx_mod_state;
+ int netdev_uc_cnt;
+ int netdev_mc_cnt;
+ int lb_test_rx_idx;
+ int lb_pkt_len;
+ u8 *lb_test_rx_buf;
+
+ u8 rss_tmpl_idx;
+ u16 num_rss;
+ u16 rss_limit;
+ u8 rss_hash_engine;
+ struct nic_rss_type rss_type;
+ u8 *rss_hkey_user;
+ /* hkey in big endian */
+ u32 *rss_hkey_user_be;
+ u32 *rss_indir_user;
+
+ u8 dcbx_cap;
+ u32 dcb_changes;
+ u8 max_cos;
+ u8 up_valid_bitmap;
+ u8 up_cos[HINIC_DCB_UP_MAX];
+ struct hinic_dcb_config dcb_cfg;
+ struct hinic_dcb_config tmp_dcb_cfg;
+ struct hinic_dcb_config save_dcb_cfg;
+ unsigned long dcb_flags;
+ int disable_port_cnt;
+ /* lock for disable or enable traffic flow */
+ struct semaphore dcb_sem;
+
+ u16 bp_lower_thd;
+ u16 bp_upper_thd;
+ bool heart_status;
+
+ struct hinic_intr_coal_info *intr_coalesce;
+ unsigned long last_moder_jiffies;
+ u32 adaptive_rx_coal;
+ u8 intr_coal_set_flag;
+ u32 his_link_speed;
+ /* interrupt coalesce must be different in virtual machine */
+ bool in_vm;
+
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats net_stats;
+#endif
+ struct hinic_nic_stats stats;
+ /* lock for nic resource */
+ struct mutex nic_mutex;
+ bool force_port_disable;
+ struct semaphore port_state_sem;
+ u8 link_status;
+
+ struct hinic_environment_info env_info;
+ struct hinic_adaptive_cfg adaptive_cfg;
+
+ /* pangea cpu affinity setting */
+ bool force_affinity;
+ cpumask_t affinity_mask;
+
+ u32 lro_replenish_thld;
+ u16 rx_buff_len;
+ u32 page_order;
+};
+
+extern struct hinic_uld_info nic_uld_info;
+
+int hinic_open(struct net_device *netdev);
+int hinic_close(struct net_device *netdev);
+int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+
+int hinic_force_port_disable(struct hinic_nic_dev *nic_dev);
+int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable);
+int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable);
+void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status);
+
+int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev);
+int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev);
+
+#define hinic_msg(level, nic_dev, msglvl, format, arg...) \
+do { \
+ if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \
+ == NETREG_REGISTERED) \
+ nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \
+ format, ## arg); \
+ else \
+ nic_##level(&(nic_dev)->pdev->dev, \
+ format, ## arg); \
+} while (0)
+
+#define hinic_info(nic_dev, msglvl, format, arg...) \
+ hinic_msg(info, nic_dev, msglvl, format, ## arg)
+
+#define hinic_warn(nic_dev, msglvl, format, arg...) \
+ hinic_msg(warn, nic_dev, msglvl, format, ## arg)
+
+#define hinic_err(nic_dev, msglvl, format, arg...) \
+ hinic_msg(err, nic_dev, msglvl, format, ## arg)
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
new file mode 100644
index 000000000000..0ecb2e2b2d31
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
@@ -0,0 +1,2084 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <net/sock.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_lld.h"
+#include "hinic_nic_dev.h"
+#include "hinic_dbg.h"
+#include "hinic_nictool.h"
+#include "hinic_qp.h"
+#include "hinic_dcb.h"
+#include "hinic_dbgtool_knl.h"
+
+#define HIADM_DEV_PATH "/dev/nictool_dev"
+#define HIADM_DEV_CLASS "nictool_class"
+#define HIADM_DEV_NAME "nictool_dev"
+
+#define MAJOR_DEV_NUM 921
+
+static dev_t g_dev_id = {0};
+/*lint -save -e104 -e808*/
+static struct class *g_nictool_class;
+/*lint -restore*/
+static struct cdev g_nictool_cdev;
+
+static int g_nictool_init_flag;
+static int g_nictool_ref_cnt;
+
+typedef int (*nic_driv_module)(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+struct nic_drv_module_handle {
+ enum driver_cmd_type driv_cmd_name;
+ nic_driv_module driv_func;
+};
+
+typedef int (*hw_driv_module)(void *hwdev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+struct hw_drv_module_handle {
+ enum driver_cmd_type driv_cmd_name;
+ hw_driv_module driv_func;
+};
+
+static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in)
+{
+ if (!buf_in)
+ return;
+
+ if (nt_msg->module == SEND_TO_UCODE)
+ hinic_free_cmd_buf(hwdev, buf_in);
+ else
+ kfree(buf_in);
+}
+
+static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg,
+ u32 in_size, void **buf_in)
+{
+ void *msg_buf;
+
+ if (!in_size)
+ return 0;
+
+ if (nt_msg->module == SEND_TO_UCODE) {
+ struct hinic_cmd_buf *cmd_buf;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ pr_err("Alloc cmdq cmd buffer failed in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+ msg_buf = cmd_buf->buf;
+ *buf_in = (void *)cmd_buf;
+ cmd_buf->size = (u16)in_size;
+ } else {
+ msg_buf = kzalloc(in_size, GFP_KERNEL);
+ *buf_in = msg_buf;
+ }
+ if (!(*buf_in)) {
+ pr_err("Alloc buffer in failed\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) {
+ pr_err("%s:%d: Copy from user failed\n",
+ __func__, __LINE__);
+ free_buff_in(hwdev, nt_msg, *buf_in);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void free_buff_out(void *hwdev, struct msg_module *nt_msg,
+ void *buf_out)
+{
+ if (!buf_out)
+ return;
+
+ if (nt_msg->module == SEND_TO_UCODE &&
+ !nt_msg->ucode_cmd.ucode_db.ucode_imm)
+ hinic_free_cmd_buf(hwdev, buf_out);
+ else
+ kfree(buf_out);
+}
+
+static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg,
+ u32 out_size, void **buf_out)
+{
+ if (!out_size)
+ return 0;
+
+ if (nt_msg->module == SEND_TO_UCODE &&
+ !nt_msg->ucode_cmd.ucode_db.ucode_imm) {
+ struct hinic_cmd_buf *cmd_buf;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ *buf_out = (void *)cmd_buf;
+ } else {
+ *buf_out = kzalloc(out_size, GFP_KERNEL);
+ }
+ if (!(*buf_out)) {
+ pr_err("Alloc buffer out failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int copy_buf_out_to_user(struct msg_module *nt_msg,
+ u32 out_size, void *buf_out)
+{
+ int ret = 0;
+ void *msg_out;
+
+ if (nt_msg->module == SEND_TO_UCODE &&
+ !nt_msg->ucode_cmd.ucode_db.ucode_imm)
+ msg_out = ((struct hinic_cmd_buf *)buf_out)->buf;
+ else
+ msg_out = buf_out;
+
+ if (copy_to_user(nt_msg->out_buf, msg_out, out_size))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_sq_info *sq_info,
+ u32 *msg_size);
+static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_rq_info *rq_info,
+ u32 *msg_size);
+
+static int get_tx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 q_id;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get tx info\n");
+ return -EFAULT;
+ }
+
+ if (!buf_in || !buf_out)
+ return -EINVAL;
+
+ q_id = *((u16 *)buf_in);
+
+ err = hinic_dbg_get_sq_info(nic_dev, q_id, buf_out, out_size);
+
+ return err;
+}
+
+static int get_q_num(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 num_qp;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get queue number\n");
+ return -EFAULT;
+ }
+
+ if (!buf_out)
+ return -EFAULT;
+
+ num_qp = hinic_dbg_get_qp_num(nic_dev->hwdev);
+ if (!num_qp)
+ return -EFAULT;
+
+ if (*out_size != sizeof(u16)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+ *((u16 *)buf_out) = num_qp;
+
+ return 0;
+}
+
+static int get_tx_wqe_info(struct hinic_nic_dev *nic_dev,
+ void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ struct hinic_wqe_info *info = buf_in;
+ u16 q_id = 0;
+ u16 idx = 0, wqebb_cnt = 1;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get tx wqe info\n");
+ return -EFAULT;
+ }
+
+ if (!info || !buf_out)
+ return -EFAULT;
+
+ /* TODO: change the type of info->q_id */
+ q_id = (u16)info->q_id;
+ idx = (u16)info->wqe_id;
+
+ err = hinic_dbg_get_sq_wqe_info(nic_dev->hwdev, q_id,
+ idx, wqebb_cnt,
+ buf_out, (u16 *)out_size);
+
+ return err;
+}
+
+static int get_rx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 q_id;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rx info\n");
+ return -EFAULT;
+ }
+
+ if (!buf_in || !buf_out)
+ return -EINVAL;
+
+ q_id = *((u16 *)buf_in);
+
+ err = hinic_dbg_get_rq_info(nic_dev, q_id, buf_out, out_size);
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "qid: %u, coalesc_timer:0x%x, pending_limit: 0x%x\n",
+ q_id, nic_dev->rxqs[q_id].last_coalesc_timer_cfg,
+ nic_dev->rxqs[q_id].last_pending_limt);
+ }
+
+ return err;
+}
+
+static int get_rx_wqe_info(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_wqe_info *info = buf_in;
+ u16 q_id = 0;
+ u16 idx = 0, wqebb_cnt = 1;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rx wqe info\n");
+ return -EFAULT;
+ }
+
+ if (!info || !buf_out)
+ return -EFAULT;
+
+ q_id = (u16)info->q_id;
+ idx = (u16)info->wqe_id;
+
+ err = hinic_dbg_get_rq_wqe_info(nic_dev->hwdev, q_id,
+ idx, wqebb_cnt,
+ buf_out, (u16 *)out_size);
+
+ return err;
+}
+
+static int get_inter_num(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u16 intr_num;
+
+ intr_num = hinic_intr_num(nic_dev->hwdev);
+
+ if (*out_size != sizeof(u16)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+ *(u16 *)buf_out = intr_num;
+
+ *out_size = sizeof(u16);
+
+ return 0;
+}
+
+static void clean_nicdev_stats(struct hinic_nic_dev *nic_dev)
+{
+ u64_stats_update_begin(&nic_dev->stats.syncp);
+ nic_dev->stats.netdev_tx_timeout = 0;
+ nic_dev->stats.tx_carrier_off_drop = 0;
+ nic_dev->stats.tx_invalid_qid = 0;
+ u64_stats_update_end(&nic_dev->stats.syncp);
+}
+
+static int clear_func_static(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ int i;
+
+ *out_size = 0;
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+ memset(&nic_dev->net_stats, 0, sizeof(nic_dev->net_stats));
+#endif
+ clean_nicdev_stats(nic_dev);
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ hinic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats);
+ hinic_txq_clean_stats(&nic_dev->txqs[i].txq_stats);
+ }
+
+ return 0;
+}
+
+static int get_num_cos(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u8 *num_cos = buf_out;
+
+ if (!buf_out || !out_size)
+ return -EINVAL;
+
+ if (*out_size != sizeof(*num_cos)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*num_cos));
+ return -EFAULT;
+ }
+
+ return hinic_get_num_cos(nic_dev, num_cos);
+}
+
+static int get_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_cos_up_map *map = buf_out;
+
+ if (!buf_out || !out_size)
+ return -EINVAL;
+
+ if (*out_size != sizeof(*map)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*map));
+ return -EFAULT;
+ }
+
+ return hinic_get_cos_up_map(nic_dev, &map->num_cos, map->cos_up);
+}
+
+static int set_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_cos_up_map *map = buf_in;
+
+ if (!buf_in || !out_size)
+ return -EINVAL;
+
+ *out_size = sizeof(*map);
+
+ return hinic_set_cos_up_map(nic_dev, map->cos_up);
+}
+
+static int get_rx_cqe_info(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_wqe_info *info = buf_in;
+ u16 q_id = 0;
+ u16 idx = 0;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rx cqe info\n");
+ return -EFAULT;
+ }
+
+ if (!info || !buf_out)
+ return -EFAULT;
+
+ if (*out_size != sizeof(struct hinic_rq_cqe)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(struct hinic_rq_cqe));
+ return -EFAULT;
+ }
+ q_id = (u16)info->q_id;
+ idx = (u16)info->wqe_id;
+
+ if (q_id >= nic_dev->num_qps || idx >= nic_dev->rxqs[q_id].q_depth)
+ return -EFAULT;
+
+ memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe,
+ sizeof(struct hinic_rq_cqe));
+
+ return 0;
+}
+
+static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_sq_info *sq_info,
+ u32 *msg_size)
+{
+ int err;
+
+ if (!nic_dev)
+ return -EINVAL;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get sq info\n");
+ return -EFAULT;
+ }
+
+ if (q_id >= nic_dev->num_qps) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Input queue id is larger than the actual queue number\n");
+ return -EINVAL;
+ }
+
+ if (*msg_size != sizeof(*sq_info)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *msg_size, sizeof(*sq_info));
+ return -EFAULT;
+ }
+ sq_info->q_id = q_id;
+ sq_info->pi = hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id);
+ sq_info->ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id);
+ sq_info->fi = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id);
+
+ sq_info->q_depth = nic_dev->txqs[q_id].q_depth;
+ /* pi_reverse */
+
+ sq_info->weqbb_size = HINIC_SQ_WQEBB_SIZE;
+ /* priority */
+
+ sq_info->ci_addr = hinic_dbg_get_sq_ci_addr(nic_dev->hwdev, q_id);
+
+ sq_info->cla_addr = hinic_dbg_get_sq_cla_addr(nic_dev->hwdev, q_id);
+ sq_info->slq_handle = hinic_dbg_get_sq_wq_handle(nic_dev->hwdev, q_id);
+
+ /* direct wqe */
+
+ err = hinic_dbg_get_sq_db_addr(nic_dev->hwdev,
+ q_id, &sq_info->db_addr.map_addr,
+ &sq_info->db_addr.phy_addr,
+ &sq_info->pg_idx);
+
+ sq_info->glb_sq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id;
+
+ return err;
+}
+
+static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_rq_info *rq_info,
+ u32 *msg_size)
+{
+ if (!nic_dev)
+ return -EINVAL;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rq info\n");
+ return -EFAULT;
+ }
+
+ if (q_id >= nic_dev->num_qps) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Input queue id is larger than the actual queue number\n");
+ return -EINVAL;
+ }
+ if (*msg_size != sizeof(*rq_info)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *msg_size, sizeof(*rq_info));
+ return -EFAULT;
+ }
+
+ rq_info->q_id = q_id;
+ rq_info->glb_rq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id;
+
+ rq_info->hw_pi = hinic_dbg_get_rq_hw_pi(nic_dev->hwdev, q_id);
+ rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx &
+ nic_dev->rxqs[q_id].q_mask;
+
+ rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update;
+
+ rq_info->wqebb_size = HINIC_RQ_WQE_SIZE;
+ rq_info->q_depth = nic_dev->rxqs[q_id].q_depth;
+
+ rq_info->buf_len = nic_dev->rxqs[q_id].buf_len;
+
+ rq_info->slq_handle = hinic_dbg_get_rq_wq_handle(nic_dev->hwdev, q_id);
+ if (!rq_info->slq_handle) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Get rq slq handle null\n");
+ return -EFAULT;
+ }
+ rq_info->ci_wqe_page_addr =
+ hinic_slq_get_first_pageaddr(rq_info->slq_handle);
+ rq_info->ci_cla_tbl_addr =
+ hinic_dbg_get_rq_cla_addr(nic_dev->hwdev, q_id);
+
+ rq_info->msix_idx = nic_dev->rxqs[q_id].msix_entry_idx;
+ rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id;
+
+ return 0;
+}
+
+static int get_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_loop_mode *mode = buf_out;
+ int err;
+
+ if (!out_size || !mode)
+ return -EFAULT;
+
+ if (*out_size != sizeof(*mode)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*mode));
+ return -EFAULT;
+ }
+ err = hinic_get_loopback_mode_ex(nic_dev->hwdev, &mode->loop_mode,
+ &mode->loop_ctrl);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int set_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_loop_mode *mode = buf_in;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't set loopback mode\n");
+ return -EFAULT;
+ }
+
+ if (!mode || !out_size)
+ return -EFAULT;
+
+ err = hinic_set_loopback_mode_ex(nic_dev->hwdev, mode->loop_mode,
+ mode->loop_ctrl);
+ if (err)
+ return err;
+
+ *out_size = sizeof(*mode);
+ return 0;
+}
+
+static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ enum hinic_nic_link_mode *link = buf_in;
+ u8 link_status;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't set link mode\n");
+ return -EFAULT;
+ }
+
+ if (!link || !out_size)
+ return -EFAULT;
+
+ switch (*link) {
+ case HINIC_LINK_MODE_AUTO:
+ if (hinic_get_link_state(nic_dev->hwdev, &link_status))
+ link_status = false;
+ hinic_link_status_change(nic_dev, (bool)link_status);
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Set link mode: auto succeed, now is link %s\n",
+ (link_status ? "up" : "down"));
+ break;
+ case HINIC_LINK_MODE_UP:
+ hinic_link_status_change(nic_dev, true);
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Set link mode: up succeed\n");
+ break;
+ case HINIC_LINK_MODE_DOWN:
+ hinic_link_status_change(nic_dev, false);
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Set link mode: down succeed\n");
+ break;
+ default:
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Invalid link mode %d to set\n", *link);
+ return -EINVAL;
+ }
+
+ *out_size = sizeof(*link);
+ return 0;
+}
+
+static int set_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u32 pf_bw_limit = 0;
+ int err;
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "To set VF bandwidth rate, please use ip link cmd\n");
+ return -EINVAL;
+ }
+
+ if (!buf_in || !buf_out || !out_size)
+ return -EINVAL;
+
+ pf_bw_limit = *((u32 *)buf_in);
+
+ err = hinic_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to set pf bandwidth limit to %d%%\n",
+ pf_bw_limit);
+ if (err < 0)
+ return err;
+ }
+
+ *((u8 *)buf_out) = (u8)err;
+ *out_size = sizeof(u8);
+
+ return 0;
+}
+
+static int get_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u32 pf_bw_limit = 0;
+ int err;
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "To get VF bandwidth rate, please use ip link cmd\n");
+ return -EINVAL;
+ }
+
+ if (*out_size != sizeof(u32)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(u32));
+ return -EFAULT;
+ }
+ err = hinic_dbg_get_pf_bw_limit(nic_dev->hwdev, &pf_bw_limit);
+ if (err)
+ return err;
+
+ *((u32 *)buf_out) = pf_bw_limit;
+
+ return 0;
+}
+
+static int get_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_poll_weight *weight_info = buf_out;
+
+ if (*out_size != sizeof(*weight_info)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*weight_info));
+ return -EFAULT;
+ }
+ weight_info->poll_weight = nic_dev->poll_weight;
+ return 0;
+}
+
+static int set_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_poll_weight *weight_info = buf_in;
+
+ nic_dev->poll_weight = weight_info->poll_weight;
+ *out_size = sizeof(u32);
+ return 0;
+}
+
+static int get_homologue(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_homologues *homo = buf_out;
+
+ if (*out_size != sizeof(*homo)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*homo));
+ return -EFAULT;
+ }
+
+ if (test_bit(HINIC_SAME_RXTX, &nic_dev->flags))
+ homo->homo_state = HINIC_HOMOLOGUES_ON;
+ else
+ homo->homo_state = HINIC_HOMOLOGUES_OFF;
+
+ *out_size = sizeof(*homo);
+
+ return 0;
+}
+
+static int set_homologue(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_homologues *homo = buf_in;
+
+ if (homo->homo_state == HINIC_HOMOLOGUES_ON) {
+ set_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+ } else if (homo->homo_state == HINIC_HOMOLOGUES_OFF) {
+ clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+ } else {
+ pr_err("Invalid parameters.\n");
+ return -EFAULT;
+ }
+
+ *out_size = sizeof(*homo);
+
+ return 0;
+}
+
+static int get_sset_count(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u32 count;
+
+ if (!buf_in || in_size != sizeof(u32) || !out_size ||
+ *out_size != sizeof(u32) || !buf_out) {
+ nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %d,
out_size: %d\n",
+ in_size, *out_size);
+ return -EINVAL;
+ }
+
+ switch (*((u32 *)buf_in)) {
+ case HINIC_SHOW_SSET_IO_STATS:
+ count = 0;
+ break;
+
+ default:
+ count = 0;
+ break;
+ }
+
+ *((u32 *)buf_out) = count;
+
+ return 0;
+}
+
+static int get_sset_stats(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_show_item *items = buf_out;
+ u32 sset, count, size;
+ int err;
+
+ if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out)
+ return -EINVAL;
+
+ size = sizeof(u32);
+ err = get_sset_count(nic_dev, buf_in, in_size, &count, &size);
+ if (err)
+ return -EINVAL;
+
+ if (count * sizeof(*items) != *out_size) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, count * sizeof(*items));
+ return -EINVAL;
+ }
+
+ sset = *((u32 *)buf_in);
+
+ switch (sset) {
+ case HINIC_SHOW_SSET_IO_STATS:
+ break;
+
+ default:
+ nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %d to get stats\n",
+ sset);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int get_func_type(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 func_typ;
+
+ func_typ = hinic_func_type(hwdev);
+ if (*out_size != sizeof(u16)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+ *(u16 *)buf_out = func_typ;
+ return 0;
+}
+
+static int get_func_id(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 func_id;
+
+ if (*out_size != sizeof(u16)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+
+ func_id = hinic_global_func_id(hwdev);
+ *(u16 *)buf_out = func_id;
+ *out_size = sizeof(u16);
+ return 0;
+}
+
+static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ int offset = 0;
+ struct chip_fault_stats *fault_info;
+
+ if (*out_size != sizeof(*fault_info)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*fault_info));
+ return -EFAULT;
+ }
+ fault_info = (struct chip_fault_stats *)buf_in;
+ offset = fault_info->offset;
+
+ fault_info = (struct chip_fault_stats *)buf_out;
+ hinic_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset);
+
+ return 0;
+}
+
+static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ return hinic_dbg_get_hw_stats(hwdev, buf_out, (u16 *)out_size);
+}
+
+static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ *out_size = hinic_dbg_clear_hw_stats(hwdev);
+ return 0;
+}
+
+static int get_drv_version(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ struct drv_version_info *ver_info;
+ char ver_str[MAX_VER_INFO_LEN] = {0};
+
+ if (*out_size != sizeof(*ver_info)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*ver_info));
+ return -EFAULT;
+ }
+ snprintf(ver_str, sizeof(ver_str), "%s [compiled with the kernel]",
+ HINIC_DRV_VERSION);
+ ver_info = (struct drv_version_info *)buf_out;
+ memcpy(ver_info->ver, ver_str, sizeof(ver_str));
+
+ return 0;
+}
+
+static int get_self_test(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ return 0;
+}
+
+static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ return 0;
+}
+
+static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ hinic_get_card_info(hwdev, buf_out);
+ *out_size = in_size;
+
+ return 0;
+}
+
+static int get_device_id(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 dev_id;
+ int err;
+
+ if (*out_size != sizeof(u16)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+
+ err = hinic_get_device_id(hwdev, &dev_id);
+ if (err)
+ return err;
+
+ *((u32 *)buf_out) = dev_id;
+ *out_size = in_size;
+
+ return 0;
+}
+
+static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ bool in_host;
+
+ if (!buf_out || (*out_size != sizeof(u8)))
+ return -EINVAL;
+
+ in_host = hinic_is_in_host();
+ if (in_host)
+ *((u8 *)buf_out) = 0;
+ else
+ *((u8 *)buf_out) = 1;
+
+ return 0;
+}
+
+static int get_pf_id(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ struct hinic_pf_info *pf_info;
+ u32 port_id = 0;
+ int err;
+
+ if (!buf_out || (*out_size != sizeof(*pf_info)))
+ return -EINVAL;
+
+ port_id = *((u32 *)buf_in);
+ pf_info = (struct hinic_pf_info *)buf_out;
+ err = hinic_get_pf_id(hwdev, port_id, &pf_info->pf_id);
+ if (err)
+ return err;
+
+ pf_info->isvalid = 1;
+ *out_size = sizeof(*pf_info);
+
+ return 0;
+}
+
+static int __get_card_usr_api_chain_mem(int card_idx)
+{
+ unsigned char *tmp;
+ int i;
+
+ card_id = card_idx;
+ if (!g_card_vir_addr[card_idx]) {
+ g_card_vir_addr[card_idx] =
+ (void *)__get_free_pages(GFP_KERNEL,
+ DBGTOOL_PAGE_ORDER);
+ if (!g_card_vir_addr[card_idx]) {
+ pr_err("Alloc api chain memory fail for card %d, virt_addr: %p!\n",
+ card_idx, g_card_vir_addr[card_idx]);
+ return -EFAULT;
+ }
+
+ memset(g_card_vir_addr[card_idx], 0,
+ PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
+
+ g_card_phy_addr[card_idx] =
+ virt_to_phys(g_card_vir_addr[card_idx]);
+ if (!g_card_phy_addr[card_idx]) {
+ pr_err("phy addr for card %d is 0, vir_addr: 0x%p\n",
+ card_idx, g_card_vir_addr[card_idx]);
+ free_pages((unsigned long)g_card_vir_addr[card_idx],
+ DBGTOOL_PAGE_ORDER);
+ g_card_vir_addr[card_idx] = NULL;
+ return -EFAULT;
+ }
+
+ tmp = g_card_vir_addr[card_idx];
+ for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
+ SetPageReserved(virt_to_page(tmp));
+ tmp += PAGE_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+static int get_pf_dev_info(char *dev_name, struct msg_module *nt_msg)
+{
+ struct pf_dev_info dev_info[16] = { {0} };
+ struct card_node *card_info = NULL;
+ int i;
+ int err;
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_card_node_array[i];
+ if (!card_info)
+ continue;
+ if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ))
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Can't find this card %s\n", dev_name);
+ return -EFAULT;
+ }
+
+ err = __get_card_usr_api_chain_mem(i);
+ if (err) {
+ pr_err("Faile to get api chain memory for userspace %s\n",
+ dev_name);
+ return -EFAULT;
+ }
+
+ chipif_get_all_pf_dev_info(dev_info, i,
+ card_info->func_handle_array);
+
+ /* Copy the dev_info to user mode*/
+ if (copy_to_user(nt_msg->out_buf, dev_info,
+ nt_msg->lenInfo.inBuffLen)) {
+ pr_err("Copy dev_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int knl_free_mem(char *dev_name, struct msg_module *nt_msg)
+{
+ struct card_node *card_info = NULL;
+ int i;
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_card_node_array[i];
+ if (!card_info)
+ continue;
+ if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ))
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Can't find this card %s\n", dev_name);
+ return -EFAULT;
+ }
+
+ dbgtool_knl_free_mem(i);
+
+ return 0;
+}
+
+extern int hinic_get_card_func_info_by_card_name(char *chip_name,
+ struct hinic_card_func_info
+ *card_func);
+
+static int get_card_func_info(char *dev_name, struct msg_module *nt_msg)
+{
+ struct hinic_card_func_info card_func_info = {0};
+ int id, err;
+
+ if (nt_msg->lenInfo.outBuffLen != sizeof(card_func_info)) {
+ pr_err("Invalid out_buf_size %d, expect %lu\n",
+ nt_msg->lenInfo.outBuffLen, sizeof(card_func_info));
+ return -EINVAL;
+ }
+
+ err = memcmp(dev_name, HINIC_CHIP_NAME, strlen(HINIC_CHIP_NAME));
+ if (err) {
+ pr_err("Invalid chip name %s\n", dev_name);
+ return err;
+ }
+
+ err = sscanf(dev_name, HINIC_CHIP_NAME "%d", &id);
+ if (err < 0) {
+ pr_err("Failed to get hinic id\n");
+ return err;
+ }
+
+ if (id >= MAX_CARD_NUM) {
+ pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1);
+ return -EINVAL;
+ }
+
+ err = hinic_get_card_func_info_by_card_name(dev_name, &card_func_info);
+ if (err)
+ return err;
+
+ if (!card_func_info.num_pf) {
+ pr_err("None function found for %s\n", dev_name);
+ return -EFAULT;
+ }
+
+ err = __get_card_usr_api_chain_mem(id);
+ if (err) {
+ pr_err("Faile to get api chain memory for userspace %s\n",
+ dev_name);
+ return -EFAULT;
+ }
+
+ card_func_info.usr_api_phy_addr = g_card_phy_addr[id];
+
+ /* Copy the dev_info to user mode*/
+ if (copy_to_user(nt_msg->out_buf, &card_func_info,
+ sizeof(card_func_info))) {
+ pr_err("Copy dev_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30
+static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u32 loop_cnt = 0;
+
+ while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) {
+ if (!hinic_get_mgmt_channel_status(hwdev))
+ return 0;
+
+ msleep(1000);
+ loop_cnt++;
+ }
+ if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+struct nic_drv_module_handle nic_driv_module_cmd_handle[] = {
+ {TX_INFO, get_tx_info},
+ {Q_NUM, get_q_num},
+ {TX_WQE_INFO, get_tx_wqe_info},
+ {RX_INFO, get_rx_info},
+ {RX_WQE_INFO, get_rx_wqe_info},
+ {RX_CQE_INFO, get_rx_cqe_info},
+ {GET_INTER_NUM, get_inter_num},
+ {CLEAR_FUNC_STASTIC, clear_func_static},
+ {GET_NUM_COS, get_num_cos},
+ {GET_COS_UP_MAP, get_dcb_cos_up_map},
+ {SET_COS_UP_MAP, set_dcb_cos_up_map},
+ {GET_LOOPBACK_MODE, get_loopback_mode},
+ {SET_LOOPBACK_MODE, set_loopback_mode},
+ {SET_LINK_MODE, set_link_mode},
+ {SET_PF_BW_LIMIT, set_pf_bw_limit},
+ {GET_PF_BW_LIMIT, get_pf_bw_limit},
+ {GET_POLL_WEIGHT, get_poll_weight},
+ {SET_POLL_WEIGHT, set_poll_weight},
+ {GET_HOMOLOGUE, get_homologue},
+ {SET_HOMOLOGUE, set_homologue},
+ {GET_SSET_COUNT, get_sset_count},
+ {GET_SSET_ITEMS, get_sset_stats},
+};
+
+struct hw_drv_module_handle hw_driv_module_cmd_handle[] = {
+ {FUNC_TYPE, get_func_type},
+ {GET_FUNC_IDX, get_func_id},
+ {GET_DRV_VERSION, get_drv_version},
+ {GET_HW_STATS, get_hw_stats},
+ {CLEAR_HW_STATS, clear_hw_stats},
+ {GET_SELF_TEST_RES, get_self_test},
+ {GET_CHIP_FAULT_STATS, get_chip_faults_stats},
+ {GET_CHIP_ID, get_chip_id_test},
+ {GET_SINGLE_CARD_INFO, get_single_card_info},
+ {GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status},
+ {GET_DEVICE_ID, get_device_id},
+ {IS_DRV_IN_VM, is_driver_in_vm},
+ {GET_PF_ID, get_pf_id},
+};
+
+static int send_to_nic_driver(struct hinic_nic_dev *nic_dev,
+ u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ int index, num_cmds = sizeof(nic_driv_module_cmd_handle) /
+ sizeof(nic_driv_module_cmd_handle[0]);
+ enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd;
+ int err = 0;
+
+ mutex_lock(&nic_dev->nic_mutex);
+ for (index = 0; index < num_cmds; index++) {
+ if (cmd_type ==
+ nic_driv_module_cmd_handle[index].driv_cmd_name) {
+ err = nic_driv_module_cmd_handle[index].driv_func
+ (nic_dev, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ }
+ }
+ mutex_unlock(&nic_dev->nic_mutex);
+
+ if (index == num_cmds)
+ pr_err("Can't find callback for %d\n", cmd_type);
+
+ return err;
+}
+
+static int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ int index, num_cmds = sizeof(hw_driv_module_cmd_handle) /
+ sizeof(hw_driv_module_cmd_handle[0]);
+ enum driver_cmd_type cmd_type =
+ (enum driver_cmd_type)(nt_msg->msg_formate);
+ int err = 0;
+
+ for (index = 0; index < num_cmds; index++) {
+ if (cmd_type ==
+ hw_driv_module_cmd_handle[index].driv_cmd_name) {
+ err = hw_driv_module_cmd_handle[index].driv_func
+ (hwdev, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int send_to_ucode(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ int ret = 0;
+
+ if (nt_msg->ucode_cmd.ucode_db.ucode_imm) {
+ ret = hinic_cmdq_direct_resp
+ (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
+ nt_msg->ucode_cmd.ucode_db.comm_mod_type,
+ nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
+ buf_in, buf_out, 0);
+ if (ret)
+ pr_err("Send direct cmdq err: %d!\n", ret);
+ } else {
+ ret = hinic_cmdq_detail_resp
+ (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
+ nt_msg->ucode_cmd.ucode_db.comm_mod_type,
+ nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
+ buf_in, buf_out, 0);
+ if (ret)
+ pr_err("Send detail cmdq err: %d!\n", ret);
+ }
+
+ return ret;
+}
+
+static int api_csr_read(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in;
+ int ret = 0;
+ u32 rd_len;
+ u32 rd_addr;
+ u32 rd_cnt = 0;
+ u32 offset = 0;
+ u8 node_id;
+ u32 i;
+
+ rd_len = up_log_msg->rd_len;
+ rd_addr = up_log_msg->addr;
+ node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
+
+ rd_cnt = rd_len / 4;
+
+ if (rd_len % 4)
+ rd_cnt++;
+
+ for (i = 0; i < rd_cnt; i++) {
+ ret = hinic_api_csr_rd32(hwdev, node_id,
+ rd_addr + offset,
+ (u32 *)(((u8 *)buf_out) + offset));
+ if (ret) {
+ pr_err("Csr rd fail, err: %d, node_id: %d, csr addr: 0x%08x\n",
+ ret, rd_addr + offset, node_id);
+ return ret;
+ }
+ offset += 4;
+ }
+ *out_size = rd_len;
+
+ return ret;
+}
+
+static int api_csr_write(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
+ int ret = 0;
+ u32 rd_len;
+ u32 rd_addr;
+ u32 rd_cnt = 0;
+ u32 offset = 0;
+ u8 node_id;
+ u32 i;
+ u8 *data;
+
+ rd_len = csr_write_msg->rd_len;
+ rd_addr = csr_write_msg->addr;
+ node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
+
+ rd_cnt = rd_len / 4;
+ if (rd_len % 4)
+ rd_cnt++;
+
+ data = kzalloc(rd_len, GFP_KERNEL);
+ if (!data) {
+ pr_err("No more memory\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) {
+ pr_err("Copy information from user failed\n");
+ kfree(data);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < rd_cnt; i++) {
+ ret = hinic_api_csr_wr32(hwdev, node_id,
+ rd_addr + offset,
+ *(data + offset));
+ if (ret) {
+ pr_err("Csr wr fail, ret: %d, node_id: %d, csr addr: 0x%08x\n",
+ ret, rd_addr + offset, node_id);
+ kfree(data);
+ return ret;
+ }
+ offset += 4;
+ }
+
+ *out_size = 0;
+ kfree(data);
+ return ret;
+}
+
+static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd)
+{
+ if (mod == HINIC_MOD_L2NIC && cmd == NIC_UP_CMD_UPDATE_FW)
+ return UP_UPDATEFW_TIME_OUT_VAL;
+ else
+ return UP_COMP_TIME_OUT_VAL;
+}
+
+static int send_to_up(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
+{
+ int ret = 0;
+
+ if (nt_msg->up_cmd.up_db.up_api_type == API_CMD) {
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u32 timeout;
+
+ mod = (enum hinic_mod_type)nt_msg->up_cmd.up_db.comm_mod_type;
+ cmd = nt_msg->up_cmd.up_db.chipif_cmd;
+
+ timeout = get_up_timeout_val(mod, cmd);
+ ret = hinic_msg_to_mgmt_sync(hwdev, mod, cmd,
+ buf_in, (u16)in_size,
+ buf_out, (u16 *)out_size,
+ timeout);
+ if (ret) {
+ pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n",
+ mod, cmd);
+ return ret;
+ }
+
+ } else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) {
+ if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) {
+ ret = api_csr_write(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ return ret;
+ }
+
+ ret = api_csr_read(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ }
+
+ return ret;
+}
+
+static int sm_rd32(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out)
+{
+ u32 val1;
+ int ret;
+
+ ret = hinic_sm_ctr_rd32(hwdev, node, instance, id, &val1);
+ if (ret) {
+ pr_err("Get sm ctr information (32 bits)failed!\n");
+ val1 = 0xffffffff;
+ }
+
+ buf_out->val1 = val1;
+
+ return ret;
+}
+
+static int sm_rd64_pair(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out)
+{
+ u64 val1 = 0, val2 = 0;
+ int ret;
+
+ ret = hinic_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2);
+ if (ret) {
+ pr_err("Get sm ctr information (64 bits pair)failed!\n");
+ val1 = 0xffffffff;
+ }
+
+ buf_out->val1 = val1;
+ buf_out->val2 = val2;
+
+ return ret;
+}
+
+static int sm_rd64(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out)
+{
+ u64 val1;
+ int ret;
+
+ ret = hinic_sm_ctr_rd64(hwdev, node, instance, id, &val1);
+ if (ret) {
+ pr_err("Get sm ctr information (64 bits)failed!\n");
+ val1 = 0xffffffff;
+ }
+ buf_out->val1 = val1;
+
+ return ret;
+}
+
+typedef int (*sm_module)(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out);
+
+struct sm_module_handle {
+ enum sm_cmd_type smCmdName;
+ sm_module smFunc;
+};
+
+struct sm_module_handle sm_module_cmd_handle[] = {
+ {SM_CTR_RD32, sm_rd32},
+ {SM_CTR_RD64_PAIR, sm_rd64_pair},
+ {SM_CTR_RD64, sm_rd64}
+};
+
+static int send_to_sm(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct sm_in_st *sm_in = buf_in;
+ struct sm_out_st *sm_out = buf_out;
+ u32 msg_formate = nt_msg->msg_formate;
+ int index, num_cmds = sizeof(sm_module_cmd_handle) /
+ sizeof(sm_module_cmd_handle[0]);
+ int ret = 0;
+
+ for (index = 0; index < num_cmds; index++) {
+ if (msg_formate == sm_module_cmd_handle[index].smCmdName)
+ ret = sm_module_cmd_handle[index].smFunc(hwdev,
+ (u32)sm_in->id,
+ (u8)sm_in->instance,
+ (u8)sm_in->node, sm_out);
+ }
+
+ if (ret)
+ pr_err("Get sm information fail!\n");
+
+ *out_size = sizeof(struct sm_out_st);
+
+ return ret;
+}
+
+static bool is_hwdev_cmd_support(unsigned int mod, char *ifname)
+{
+ void *hwdev;
+
+ hwdev = hinic_get_hwdev_by_ifname(ifname);
+ if (!hwdev) {
+ pr_err("Can not get the device %s correctly\n", ifname);
+ return false;
+ }
+
+ switch (mod) {
+ case SEND_TO_UP:
+ case SEND_TO_SM:
+ if (FUNC_SUPPORT_MGMT(hwdev)) {
+ if (!hinic_is_hwdev_mod_inited
+ (hwdev, HINIC_HWDEV_MGMT_INITED)) {
+ pr_err("MGMT have not initialized\n");
+ return false;
+ }
+ } else if (!hinic_is_hwdev_mod_inited
+ (hwdev, HINIC_HWDEV_MBOX_INITED)) {
+ pr_err("MBOX have not initialized\n");
+ return false;
+ }
+
+ if (mod == SEND_TO_SM &&
+ ((hinic_func_type(hwdev) == TYPE_VF) ||
+ (!hinic_is_hwdev_mod_inited(hwdev,
+ HINIC_HWDEV_MGMT_INITED)))) {
+ pr_err("Current function do not support this cmd\n");
+ return false;
+ }
+ break;
+
+ case SEND_TO_UCODE:
+ if (!hinic_is_hwdev_mod_inited(hwdev,
+ HINIC_HWDEV_CMDQ_INITED)) {
+ pr_err("CMDQ have not initialized\n");
+ return false;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool nictool_k_is_cmd_support(unsigned int mod, char *ifname)
+{
+ enum hinic_init_state init_state =
+ hinic_get_init_state_by_ifname(ifname);
+ bool support = true;
+
+ if (init_state == HINIC_INIT_STATE_NONE)
+ return false;
+
+ if (mod == SEND_TO_NIC_DRIVER) {
+ if (init_state < HINIC_INIT_STATE_NIC_INITED) {
+ pr_err("NIC driver have not initialized\n");
+ return false;
+ }
+ } else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) {
+ return is_hwdev_cmd_support(mod, ifname);
+ } else if ((mod >= HINICADM_OVS_DRIVER &&
+ mod <= HINICADM_FCOE_DRIVER) ||
+ mod == SEND_TO_HW_DRIVER) {
+ if (init_state < HINIC_INIT_STATE_HWDEV_INITED) {
+ pr_err("Hwdev have not initialized\n");
+ return false;
+ }
+ } else {
+ pr_err("Unsupport mod %d\n", mod);
+ support = false;
+ }
+
+ return support;
+}
+
+static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size,
+ void **buf_in, u32 out_size, void **buf_out)
+{
+ int ret;
+
+ ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in);
+ if (ret) {
+ pr_err("Alloc tool cmd buff in failed\n");
+ return ret;
+ }
+
+ ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out);
+ if (ret) {
+ pr_err("Alloc tool cmd buff out failed\n");
+ goto out_free_buf_in;
+ }
+
+ return 0;
+
+out_free_buf_in:
+ free_buff_in(hwdev, nt_msg, *buf_in);
+
+ return ret;
+}
+
+static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, void *buf_out)
+{
+ free_buff_out(hwdev, nt_msg, buf_out);
+ free_buff_in(hwdev, nt_msg, buf_in);
+}
+
+static int get_self_test_cmd(struct msg_module *nt_msg)
+{
+ int ret;
+ u32 res = 0;
+ u32 out_size_expect = nt_msg->lenInfo.outBuffLen;
+
+ ret = hinic_get_self_test_result(nt_msg->device_name, &res);
+ if (ret) {
+ pr_err("Get self test result failed!\n");
+ return -EFAULT;
+ }
+
+ ret = copy_buf_out_to_user(nt_msg, out_size_expect, &res);
+ if (ret)
+ pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__);
+
+ return ret;
+}
+
+static int get_all_chip_id_cmd(struct msg_module *nt_msg)
+{
+ int ret = 0;
+ u32 out_size_expect = nt_msg->lenInfo.outBuffLen;
+ struct nic_card_id card_id;
+
+ hinic_get_all_chip_id((void *)&card_id);
+
+ if (copy_to_user(nt_msg->out_buf, &card_id, out_size_expect))
+ pr_err("Copy chip id to user failed\n");
+
+ return ret;
+}
+
+int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ return send_to_nic_driver(uld_dev, cmd, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void *__get_dev_support_nic_cmd(struct msg_module *nt_msg,
+ enum hinic_service_type type)
+{
+ void *uld_dev = NULL;
+
+ /* set/get qos must use chip_name(hinic0) */
+ switch (nt_msg->msg_formate) {
+ case GET_COS_UP_MAP:
+ case SET_COS_UP_MAP:
+ case GET_NUM_COS:
+ uld_dev = hinic_get_uld_by_chip_name(nt_msg->device_name, type);
+ if (!uld_dev)
+ pr_err("Get/set cos_up must use chip_name(hinic0)\n");
+
+ return uld_dev;
+
+ default:
+ break;
+ }
+
+ uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type);
+ if (!uld_dev)
+ pr_err("Can not get the uld dev correctly: %s, nic driver may be not
register\n",
+ nt_msg->device_name);
+
+ return uld_dev;
+}
+
+static void *get_support_uld_dev(struct msg_module *nt_msg,
+ enum hinic_service_type type)
+{
+ char *service_name[SERVICE_T_MAX] = {"NIC", "OVS", "ROCE",
"TOE",
+ "IWARP", "FC", "FCOE"};
+ void *hwdev = NULL;
+ void *uld_dev = NULL;
+
+ switch (nt_msg->module) {
+ case SEND_TO_NIC_DRIVER:
+ hwdev = hinic_get_hwdev_by_ifname(nt_msg->device_name);
+ if (!hinic_support_nic(hwdev, NULL)) {
+ pr_err("Current function don't support NIC\n");
+ return NULL;
+ }
+ return __get_dev_support_nic_cmd(nt_msg, type);
+ default:
+ break;
+ }
+
+ uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type);
+ if (!uld_dev)
+ pr_err("Can not get the uld dev correctly: %s, %s driver may be not
register\n",
+ nt_msg->device_name, service_name[type]);
+
+ return uld_dev;
+}
+
+static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ enum hinic_service_type type;
+ int ret = 0;
+
+ type = nt_msg->module - SEND_TO_SM;
+ *out_size = sizeof(struct drv_version_info);
+
+ if (!g_uld_info[type].ioctl)
+ return ret;
+
+ ret = g_uld_info[type].ioctl(NULL, nt_msg->msg_formate, buf_in, in_size,
+ buf_out, out_size);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(nt_msg->out_buf, buf_out, *out_size))
+ return -EFAULT;
+
+ return ret;
+}
+
+int send_to_service_driver(struct msg_module *nt_msg, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ enum hinic_service_type type;
+ void *uld_dev;
+ int ret = -EINVAL;
+
+ if (nt_msg->module == SEND_TO_NIC_DRIVER)
+ type = SERVICE_T_NIC;
+ else
+ type = nt_msg->module - SEND_TO_SM;
+
+ if (type < SERVICE_T_MAX) {
+ uld_dev = get_support_uld_dev(nt_msg, type);
+ if (!uld_dev)
+ return -EINVAL;
+
+ if (g_uld_info[type].ioctl)
+ ret = g_uld_info[type].ioctl(uld_dev,
+ nt_msg->msg_formate,
+ buf_in, in_size, buf_out,
+ out_size);
+ } else {
+ pr_err("Ioctl input module id: %d is incorrectly\n",
+ nt_msg->module);
+ }
+
+ return ret;
+}
+
+static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ int ret;
+
+ switch (nt_msg->module) {
+ case SEND_TO_HW_DRIVER:
+ ret = send_to_hw_driver(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ case SEND_TO_UP:
+ ret = send_to_up(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ case SEND_TO_UCODE:
+ ret = send_to_ucode(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ case SEND_TO_SM:
+ ret = send_to_sm(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ default:
+ ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out,
+ out_size);
+ break;
+ }
+
+ return ret;
+}
+
+static bool hinic_is_special_handling_cmd(struct msg_module *nt_msg, int *ret)
+{
+ unsigned int cmd_raw = nt_msg->module;
+
+ /* Get self test result directly whatever driver probe success or not */
+ if (cmd_raw == SEND_TO_HW_DRIVER &&
+ nt_msg->msg_formate == GET_SELF_TEST_RES) {
+ *ret = get_self_test_cmd(nt_msg);
+ return true;
+ }
+
+ if (cmd_raw == SEND_TO_HW_DRIVER &&
+ nt_msg->msg_formate == GET_CHIP_ID) {
+ *ret = get_all_chip_id_cmd(nt_msg);
+ return true;
+ }
+
+ if (cmd_raw == SEND_TO_HW_DRIVER &&
+ nt_msg->msg_formate == GET_PF_DEV_INFO) {
+ *ret = get_pf_dev_info(nt_msg->device_name, nt_msg);
+ return true;
+ }
+
+ if (cmd_raw == SEND_TO_HW_DRIVER &&
+ nt_msg->msg_formate == CMD_FREE_MEM) {
+ *ret = knl_free_mem(nt_msg->device_name, nt_msg);
+ return true;
+ }
+
+ if (cmd_raw == SEND_TO_HW_DRIVER &&
+ nt_msg->msg_formate == GET_CHIP_INFO) {
+ *ret = get_card_func_info(nt_msg->device_name, nt_msg);
+ return true;
+ }
+
+ return false;
+}
+
+static long nictool_k_unlocked_ioctl(struct file *pfile,
+ unsigned int cmd, unsigned long arg)
+{
+ void *hwdev;
+ struct msg_module nt_msg;
+ void *buf_out = NULL;
+ void *buf_in = NULL;
+ u32 out_size_expect = 0;
+ u32 out_size = 0;
+ u32 in_size = 0;
+ unsigned int cmd_raw = 0;
+ int ret = 0;
+
+ memset(&nt_msg, 0, sizeof(nt_msg));
+
+ if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) {
+ pr_err("Copy information from user failed\n");
+ return -EFAULT;
+ }
+
+ cmd_raw = nt_msg.module;
+
+ out_size_expect = nt_msg.lenInfo.outBuffLen;
+ in_size = nt_msg.lenInfo.inBuffLen;
+
+ hinic_tool_cnt_inc();
+
+ if (hinic_is_special_handling_cmd(&nt_msg, &ret))
+ goto out_free_lock;
+
+ if (cmd_raw == HINICADM_FC_DRIVER &&
+ nt_msg.msg_formate == GET_CHIP_ID)
+ get_fc_devname(nt_msg.device_name);
+
+ if (!nictool_k_is_cmd_support(cmd_raw, nt_msg.device_name)) {
+ ret = -EFAULT;
+ goto out_free_lock;
+ }
+
+ /* get the netdevice */
+ hwdev = hinic_get_hwdev_by_ifname(nt_msg.device_name);
+ if (!hwdev) {
+ pr_err("Can not get the device %s correctly\n",
+ nt_msg.device_name);
+ ret = -ENODEV;
+ goto out_free_lock;
+ }
+
+ ret = alloc_tmp_buf(hwdev, &nt_msg, in_size,
+ &buf_in, out_size_expect, &buf_out);
+ if (ret) {
+ pr_err("Alloc tmp buff failed\n");
+ goto out_free_lock;
+ }
+
+ out_size = out_size_expect;
+
+ if (nt_msg.msg_formate == GET_DRV_VERSION &&
+ (cmd_raw == HINICADM_FC_DRIVER || cmd_raw == HINICADM_TOE_DRIVER)) {
+ ret = get_service_drv_version(hwdev, &nt_msg, buf_in,
+ in_size, buf_out, &out_size);
+ goto out_free_buf;
+ }
+
+ ret = nictool_exec_cmd(hwdev, &nt_msg, buf_in,
+ in_size, buf_out, &out_size);
+ if (ret)
+ goto out_free_buf;
+
+ ret = copy_buf_out_to_user(&nt_msg, out_size_expect, buf_out);
+ if (ret)
+ pr_err("Copy information to user failed\n");
+
+out_free_buf:
+ free_tmp_buf(hwdev, &nt_msg, buf_in, buf_out);
+
+out_free_lock:
+ hinic_tool_cnt_dec();
+
+ return (long)ret;
+}
+
+static int nictool_k_open(struct inode *pnode, struct file *pfile)
+{
+ return 0;
+}
+
+static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf,
+ size_t size, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf,
+ size_t size, loff_t *ppos)
+{
+ return 0;
+}
+
+static const struct file_operations fifo_operations = {
+ .owner = THIS_MODULE,
+ .open = nictool_k_open,
+ .read = nictool_k_read,
+ .write = nictool_k_write,
+ .unlocked_ioctl = nictool_k_unlocked_ioctl,
+ .mmap = hinic_mem_mmap,
+};
+
+int if_nictool_exist(void)
+{
+ struct file *fp = NULL;
+ int exist = 0;
+
+ fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ exist = 0;
+ } else {
+ (void)filp_close(fp, NULL);
+ exist = 1;
+ }
+
+ return exist;
+}
+
+/**
+ * nictool_k_init - initialize the hw interface
+ **/
+int nictool_k_init(void)
+{
+ int ret;
+ struct device *pdevice;
+
+ if (g_nictool_init_flag) {
+ g_nictool_ref_cnt++;
+ /* already initialized */
+ return 0;
+ }
+
+ if (if_nictool_exist()) {
+ pr_err("Nictool device exists\n");
+ return 0;
+ }
+
+ /* Device ID: primary device ID (12bit) |
+ * secondary device number (20bit)
+ */
+ g_dev_id = MKDEV(MAJOR_DEV_NUM, 0);
+
+ /* Static device registration number */
+ ret = register_chrdev_region(g_dev_id, 1, HIADM_DEV_NAME);
+ if (ret < 0) {
+ ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME);
+ if (ret < 0) {
+ pr_err("Register nictool_dev fail(0x%x)\n", ret);
+ return ret;
+ }
+ }
+
+ /* Create equipment */
+ /*lint -save -e160*/
+ g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS);
+ /*lint -restore*/
+ if (IS_ERR(g_nictool_class)) {
+ pr_err("Create nictool_class fail\n");
+ ret = -EFAULT;
+ goto class_create_err;
+ }
+
+ /* Initializing the character device */
+ cdev_init(&g_nictool_cdev, &fifo_operations);
+
+ /* Add devices to the operating system */
+ ret = cdev_add(&g_nictool_cdev, g_dev_id, 1);
+ if (ret < 0) {
+ pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret);
+ goto cdev_add_err;
+ }
+
+ /* Export device information to user space
+ * (/sys/class/class name/device name)
+ */
+ pdevice = device_create(g_nictool_class, NULL,
+ g_dev_id, NULL, HIADM_DEV_NAME);
+ if (IS_ERR(pdevice)) {
+ pr_err("Export nictool device information to user space fail\n");
+ goto device_create_err;
+ }
+
+ g_nictool_init_flag = 1;
+ g_nictool_ref_cnt = 1;
+
+ pr_info("Register nictool_dev to system succeed\n");
+
+ return 0;
+
+device_create_err:
+ cdev_del(&g_nictool_cdev);
+
+cdev_add_err:
+ class_destroy(g_nictool_class);
+
+class_create_err:
+ g_nictool_class = NULL;
+ unregister_chrdev_region(g_dev_id, 1);
+
+ return ret;
+}
+
+void nictool_k_uninit(void)
+{
+ if (g_nictool_init_flag) {
+ if ((--g_nictool_ref_cnt))
+ return;
+ }
+
+ g_nictool_init_flag = 0;
+
+ if (!g_nictool_class || IS_ERR(g_nictool_class))
+ return;
+
+ cdev_del(&g_nictool_cdev);
+ device_destroy(g_nictool_class, g_dev_id);
+ class_destroy(g_nictool_class);
+ g_nictool_class = NULL;
+
+ unregister_chrdev_region(g_dev_id, 1);
+
+ pr_info("Unregister nictool_dev succeed\n");
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
new file mode 100644
index 000000000000..b894bf9abecb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NICTOOL_H_
+#define HINIC_NICTOOL_H_
+
+#include "hinic_dfx_def.h"
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+/* completion timeout interval, unit is jiffies*/
+#define UP_COMP_TIME_OUT_VAL 10000U
+
+struct sm_in_st {
+ int node;
+ int id;
+ int instance;
+};
+
+struct sm_out_st {
+ u64 val1;
+ u64 val2;
+};
+
+struct up_log_msg_st {
+ u32 rd_len;
+ u32 addr;
+};
+
+struct csr_write_st {
+ u32 rd_len;
+ u32 addr;
+ u8 *data;
+};
+
+struct ipsurx_stats_info {
+ u32 addr;
+ u32 rd_cnt;
+};
+
+struct ucode_cmd_st {
+ union {
+ struct {
+ u32 comm_mod_type:8;
+ u32 ucode_cmd_type:4;
+ u32 cmdq_ack_type:3;
+ u32 ucode_imm:1;
+ u32 len:16;
+ } ucode_db;
+ u32 value;
+ };
+};
+
+struct up_cmd_st {
+ union {
+ struct {
+ u32 comm_mod_type:8;
+ u32 chipif_cmd:8;
+ u32 up_api_type:16;
+ } up_db;
+ u32 value;
+ };
+};
+
+#define API_CMD 0x1
+#define API_CHAIN 0x2
+
+struct msg_module {
+ char device_name[IFNAMSIZ];
+ unsigned int module;
+ union {
+ u32 msg_formate;
+ struct ucode_cmd_st ucode_cmd;
+ struct up_cmd_st up_cmd;
+ };
+
+ struct {
+ u32 inBuffLen;
+ u32 outBuffLen;
+ } lenInfo;
+ u32 res;
+ void *in_buff;
+ void *out_buf;
+};
+
+#define MAX_VER_INFO_LEN 128
+struct drv_version_info {
+ char ver[MAX_VER_INFO_LEN];
+};
+
+struct chip_fault_stats {
+ int offset;
+ u8 chip_faults[MAX_DRV_BUF_SIZE];
+};
+
+struct hinic_wqe_info {
+ int q_id;
+ void *slq_handle;
+ unsigned int wqe_id;
+};
+
+struct hinic_cos_up_map {
+ u8 cos_up[HINIC_DCB_UP_MAX];
+ u8 num_cos;
+};
+
+struct hinic_tx_hw_page {
+ u64 phy_addr;
+ u64 *map_addr;
+};
+
+struct hinic_dbg_sq_info {
+ u16 q_id;
+ u16 pi;
+ u16 ci;/* sw_ci */
+ u16 fi;/* hw_ci */
+
+ u32 q_depth;
+ u16 pi_reverse;/* TODO: what is this? */
+ u16 weqbb_size;
+
+ u8 priority;
+ u16 *ci_addr;
+ u64 cla_addr;
+
+ void *slq_handle;
+
+ /* TODO: NIC don't use direct wqe */
+ struct hinic_tx_hw_page direct_wqe;
+ struct hinic_tx_hw_page db_addr;
+ u32 pg_idx;
+
+ u32 glb_sq_id;
+};
+
+struct hinic_dbg_rq_info {
+ u16 q_id;
+ u16 glb_rq_id;
+ u16 hw_pi;
+ u16 ci; /* sw_ci */
+ u16 sw_pi;
+ u16 wqebb_size;
+ u16 q_depth;
+ u16 buf_len;
+
+ void *slq_handle;
+ u64 ci_wqe_page_addr;
+ u64 ci_cla_tbl_addr;
+
+ u16 msix_idx;
+ u32 msix_vector;
+};
+
+#ifndef BUSINFO_LEN
+#define BUSINFO_LEN (32)
+#endif
+struct pf_info {
+ char name[IFNAMSIZ];
+ char bus_info[BUSINFO_LEN];
+ u32 pf_type;
+};
+
+#ifndef MAX_SIZE
+#define MAX_SIZE (16)
+#endif
+struct card_info {
+ struct pf_info pf[MAX_SIZE];
+ u32 pf_num;
+};
+
+struct nic_card_id {
+ u32 id[MAX_SIZE];
+ u32 num;
+};
+
+struct func_pdev_info {
+ u64 bar0_phy_addr;
+ u64 bar0_size;
+ u64 rsvd1[4];
+};
+
+struct hinic_card_func_info {
+ u32 num_pf;
+ u32 rsvd0;
+ u64 usr_api_phy_addr;
+ struct func_pdev_info pdev_info[MAX_SIZE];
+};
+
+#ifndef NIC_UP_CMD_UPDATE_FW
+#define NIC_UP_CMD_UPDATE_FW (114)
+#endif
+
+#ifndef MAX_CARD_NUM
+#define MAX_CARD_NUM (64)
+#endif
+extern void *g_card_node_array[MAX_CARD_NUM];
+extern void *g_card_vir_addr[MAX_CARD_NUM];
+extern u64 g_card_phy_addr[MAX_CARD_NUM];
+extern int card_id;
+
+struct hinic_nic_loop_mode {
+ u32 loop_mode;
+ u32 loop_ctrl;
+};
+
+struct hinic_nic_poll_weight {
+ int poll_weight;
+};
+
+enum hinic_homologues_state {
+ HINIC_HOMOLOGUES_OFF = 0,
+ HINIC_HOMOLOGUES_ON = 1,
+};
+
+struct hinic_homologues {
+ enum hinic_homologues_state homo_state;
+};
+
+struct hinic_pf_info {
+ u32 isvalid;
+ u32 pf_id;
+};
+
+int nictool_k_init(void);
+void nictool_k_uninit(void);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h
b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h
new file mode 100644
index 000000000000..f45e54be9c3c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_PCI_ID_TBL_H
+#define HINIC_PCI_ID_TBL_H
+
+#define PCI_VENDOR_ID_HUAWEI 0x19e5
+#define HINIC_DEV_ID_1822_PF 0x1822
+#define HINIC_DEV_ID_1822_VF 0x375E
+#define HINIC_DEV_ID_1822_VF_HV 0x379E
+#define HINIC_DEV_ID_1822_SMTIO 0x020B
+#define HINIC_DEV_ID_1822_PANGEA_100GE 0x0208
+#define HINIC_DEV_ID_1822_PANGEA_TP_10GE 0x0204
+#define HINIC_DEV_ID_1822_KR_40GE 0x020D
+#define HINIC_DEV_ID_1822_KR_100GE 0x0205
+#define HINIC_DEV_ID_1822_KR_25GE 0x0210
+#define HINIC_DEV_ID_1822_MULTI_HOST 0x0211
+#define HINIC_DEV_ID_1822_100GE 0x0200
+#define HINIC_DEV_ID_1822_100GE_MULTI_HOST 0x0201
+
+#define HIFC_DEV_ID_1822_8G 0x0212
+#define HIFC_DEV_ID_1822_16G 0x0203
+#define HIFC_DEV_ID_1822_32G 0x0202
+
+#define HIFC_DEV_ID_1822_SMTIO 0x020C
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.c
b/drivers/net/ethernet/huawei/hinic/hinic_qp.c
new file mode 100644
index 000000000000..c61df2f96283
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "hinic_nic_io.h"
+#include "hinic_qp.h"
+
+#define BUF_DESC_SHIFT 1
+#define BUF_DESC_SIZE(nr_descs) (((u32)nr_descs) << BUF_DESC_SHIFT)
+
+void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info,
+ int nr_descs, u8 owner)
+{
+ u32 ctrl_size, task_size, bufdesc_size;
+
+ ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
+ task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
+ bufdesc_size = BUF_DESC_SIZE(nr_descs);
+
+ ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
+ SQ_CTRL_SET(task_size, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(ctrl_size, LEN) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ ctrl->ctrl_fmt = be32_to_cpu(ctrl->ctrl_fmt);
+
+ ctrl->queue_info = queue_info;
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC);
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) {
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS);
+ } else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) {
+ /* mss should not less than 80 */
+ ctrl->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info,
+ MSS);
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS);
+ }
+ ctrl->queue_info = be32_to_cpu(ctrl->queue_info);
+}
+
+int hinic_get_rx_done(struct hinic_rq_cqe *cqe)
+{
+ u32 status;
+ int rx_done;
+
+ status = be32_to_cpu(cqe->status);
+
+ rx_done = RQ_CQE_STATUS_GET(status, RXDONE);
+ if (!rx_done)
+ return 0;
+
+ return 1;
+}
+
+void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old)
+{
+ u32 status;
+
+ status = RQ_CQE_STATUS_CLEAR(status_old, RXDONE);
+
+ cqe->status = cpu_to_be32(status);
+
+ /* Make sure Rxdone has been set */
+ wmb();
+}
+
+int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe)
+{
+ u32 pkt_info;
+ int super_cqe_en;
+
+ pkt_info = be32_to_cpu(cqe->pkt_info);
+
+ super_cqe_en = RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN);
+ if (!super_cqe_en)
+ return 0;
+
+ return 1;
+}
+
+u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe)
+{
+ u32 vlan_len = be32_to_cpu(cqe->vlan_len);
+
+ return RQ_CQE_SGE_GET(vlan_len, LEN);
+}
+
+u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe)
+{
+ u32 pkt_num = be32_to_cpu(cqe->pkt_info);
+
+ return RQ_CQE_PKT_NUM_GET(pkt_num, NUM);
+}
+
+u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe,
+ bool last)
+{
+ u32 pkt_len = be32_to_cpu(cqe->pkt_info);
+
+ if (!last)
+ return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN);
+ else
+ return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN);
+}
+
+void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,
+ dma_addr_t cqe_dma)
+{
+ struct hinic_rq_wqe *rq_wqe = (struct hinic_rq_wqe *)wqe;
+ struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
+ struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
+ struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
+ u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
+
+ ctrl->ctrl_fmt =
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
+ RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
+
+ hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
+
+ buf_desc->addr_high = upper_32_bits(buf_addr);
+ buf_desc->addr_low = lower_32_bits(buf_addr);
+}
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum sq_l4offload_type l4_offload,
+ u32 l4_len, u32 offset)
+{
+ u32 tcp_udp_cs = 0, sctp = 0;
+ u32 mss = TX_MSS_DEFAULT;
+
+ /* tcp_udp_cs should be setted to calculate outter checksum when vxlan
+ * packets without inner l3 and l4
+ */
+ if (unlikely(l4_offload == SCTP_OFFLOAD_ENABLE))
+ sctp = 1;
+ else
+ tcp_udp_cs = 1;
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD);
+ task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) |
+ SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) |
+ SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP);
+
+ *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS);
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+}
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum sq_l4offload_type l4_offload,
+ u32 l4_len,
+ u32 offset, u32 ip_ident, u32 mss)
+{
+ u32 tso = 0, ufo = 0;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE)
+ tso = 1;
+ else if (l4_offload == UDP_OFFLOAD_ENABLE)
+ ufo = 1;
+
+ task->ufo_v6_identify = be32_to_cpu(ip_ident);
+ /* just keep the same code style here */
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(tso || ufo, TSO_UFO);
+ task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) |
+ SQ_CTRL_QUEUE_INFO_SET(tso, TSO) |
+ SQ_CTRL_QUEUE_INFO_SET(ufo, UFO) |
+ SQ_CTRL_QUEUE_INFO_SET(!!l4_offload, TCPUDP_CS);
+ /* cs must be calculate by hw if tso is enable */
+
+ *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS);
+ /* qsf was initialized in prepare_sq_wqe */
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+}
+
+void hinic_set_vlan_tx_offload(struct hinic_sq_task *task,
+ u32 *queue_info,
+ u16 vlan_tag, u16 vlan_pri)
+{
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI);
+}
+
+void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len)
+{
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.h
b/drivers/net/ethernet/huawei/hinic/hinic_qp.h
new file mode 100644
index 000000000000..8789272b4fcb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_QP_H
+#define HINIC_QP_H
+
+#include "hinic_qe_def.h"
+#include "hinic_port_cmd.h"
+
+/* frags and linner */
+#define HINIC_MAX_SQ_BUFDESCS (MAX_SKB_FRAGS + 1)
+#define HINIC_MAX_SQ_SGE 17
+#define HINIC_MAX_SKB_NR_FRAGE (HINIC_MAX_SQ_SGE - 1)
+#define HINIC_GSO_MAX_SIZE 65536
+
+struct hinic_sq_ctrl {
+ u32 ctrl_fmt;
+ u32 queue_info;
+};
+
+struct hinic_sq_task {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 ufo_v6_identify;
+ u32 pkt_info4;
+ u32 rsvd5;
+};
+
+struct hinic_sq_bufdesc {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+ u32 rsvd;
+};
+
+struct hinic_sq_wqe {
+ struct hinic_sq_ctrl ctrl;
+ struct hinic_sq_task task;
+ struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
+};
+
+struct hinic_rq_ctrl {
+ u32 ctrl_fmt;
+};
+
+struct hinic_rq_cqe {
+ u32 status;
+ u32 vlan_len;
+
+ u32 offload_type;
+ u32 hash_val;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 rsvd6;
+ u32 pkt_info;
+};
+
+struct hinic_rq_cqe_sect {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_rq_bufdesc {
+ u32 addr_high;
+ u32 addr_low;
+};
+
+struct hinic_rq_wqe {
+ struct hinic_rq_ctrl ctrl;
+ u32 rsvd;
+ struct hinic_rq_cqe_sect cqe_sect;
+ struct hinic_rq_bufdesc buf_desc;
+};
+
+void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info,
+ int nr_descs, u8 owner);
+
+u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe);
+
+int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe);
+
+u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe, bool last);
+
+u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe);
+
+int hinic_get_rx_done(struct hinic_rq_cqe *cqe);
+
+void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old);
+
+void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,
+ dma_addr_t cqe_dma);
+
+#ifdef static
+#undef static
+#define LLT_STATIC_DEF_SAVED
+#endif
+static inline void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum sq_l3_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info2 |= SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
+ SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
+}
+
+static inline void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum sq_tunnel_l4_type l4_type,
+ u32 tunnel_len)
+{
+ task->pkt_info2 |= SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
+ SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
+}
+
+static inline void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum sq_l3_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
+ task->pkt_info1 |= SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
+}
+
+#ifdef LLT_STATIC_DEF_SAVED
+#define static
+#undef LLT_STATIC_DEF_SAVED
+#endif
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum sq_l4offload_type l4_offload,
+ u32 l4_len, u32 offset);
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum sq_l4offload_type l4_offload, u32 l4_len,
+ u32 offset, u32 ip_ident, u32 mss);
+
+void hinic_set_vlan_tx_offload(struct hinic_sq_task *task, u32 *queue_info,
+ u16 vlan_tag, u16 vlan_pri);
+
+void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 8b0d6a6631cc..38f0a0e86eda 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -13,57 +13,369 @@
*
*/
-#include <linux/kernel.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
-#include <linux/prefetch.h>
-#include <linux/cpumask.h>
-#include <asm/barrier.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_dev.h"
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
+#include "hinic_qp.h"
#include "hinic_rx.h"
-#include "hinic_dev.h"
-#define RX_IRQ_NO_PENDING 0
-#define RX_IRQ_NO_COALESC 0
-#define RX_IRQ_NO_LLI_TIMER 0
-#define RX_IRQ_NO_CREDIT 0
-#define RX_IRQ_NO_RESEND_TIMER 0
+static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev);
+
+#define HINIC_RX_HDR_SIZE 256
+#define HINIC_RX_BUFFER_WRITE 16
+#define HINIC_RX_IPV6_PKT 7
+#define HINIC_RX_VXLAN_PKT 0xb
-/**
- * hinic_rxq_clean_stats - Clean the statistics of specific queue
- * @rxq: Logical Rx Queue
- **/
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+#define RXQ_STATS_INC(rxq, field) \
+{ \
+ u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \
+ (rxq)->rxq_stats.field++; \
+ u64_stats_update_end(&(rxq)->rxq_stats.syncp); \
+}
+
+static bool rx_alloc_mapped_page(struct hinic_rxq *rxq,
+ struct hinic_rx_info *rx_info)
{
- struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+
+ struct page *page = rx_info->page;
+ dma_addr_t dma = rx_info->buf_dma_addr;
+
+ if (likely(dma))
+ return true;
+
+ /* alloc new page for storage */
+ page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COLD |
+ __GFP_COMP, nic_dev->page_order);
+ if (unlikely(!page)) {
+ nicif_err(nic_dev, drv, netdev, "Alloc rxq: %d page failed\n",
+ rxq->q_id);
+ return false;
+ }
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->pkts = 0;
- rxq_stats->bytes = 0;
- u64_stats_update_end(&rxq_stats->syncp);
+ /* map page for use */
+ dma = dma_map_page(&pdev->dev, page, 0, rxq->dma_rx_buff_size,
+ DMA_FROM_DEVICE);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (unlikely(dma_mapping_error(&pdev->dev, dma))) {
+ nicif_err(nic_dev, drv, netdev, "Failed to map page to rx buffer\n");
+ __free_pages(page, nic_dev->page_order);
+ return false;
+ }
+
+ rx_info->page = page;
+ rx_info->buf_dma_addr = dma;
+ rx_info->page_offset = 0;
+
+ return true;
+}
+
+static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_rq_wqe *rq_wqe;
+ struct hinic_rx_info *rx_info;
+ dma_addr_t dma_addr = 0;
+ u16 pi = 0;
+ int rq_wqe_len;
+ int i;
+
+ for (i = 0; i < rxq->q_depth; i++) {
+ rx_info = &rxq->rx_info[i];
+
+ rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
+ if (!rq_wqe) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get rq wqe, rxq id: %d, wqe id:
%d\n",
+ rxq->q_id, i);
+ break;
+ }
+
+ hinic_prepare_rq_wqe(rq_wqe, pi, dma_addr, rx_info->cqe_dma);
+
+ rq_wqe_len = sizeof(struct hinic_rq_wqe);
+ hinic_cpu_to_be32(rq_wqe, rq_wqe_len);
+ rx_info->rq_wqe = rq_wqe;
+ }
+
+ hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, rxq->q_depth);
+
+ return i;
+}
+
+static int hinic_rx_fill_buffers(struct hinic_rxq *rxq)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_rq_wqe *rq_wqe;
+ struct hinic_rx_info *rx_info;
+ dma_addr_t dma_addr;
+ int i;
+ int free_wqebbs = rxq->delta - 1;
+
+ for (i = 0; i < free_wqebbs; i++) {
+ rx_info = &rxq->rx_info[rxq->next_to_update];
+
+ if (unlikely(!rx_alloc_mapped_page(rxq, rx_info)))
+ break;
+
+ dma_addr = rx_info->buf_dma_addr + rx_info->page_offset;
+
+ rq_wqe = rx_info->rq_wqe;
+
+ rq_wqe->buf_desc.addr_high =
+ cpu_to_be32(upper_32_bits(dma_addr));
+ rq_wqe->buf_desc.addr_low =
+ cpu_to_be32(lower_32_bits(dma_addr));
+ rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
+ }
+
+ if (likely(i)) {
+ /* Write all the wqes before pi update */
+
+ hinic_update_rq_hw_pi(nic_dev->hwdev, rxq->q_id,
+ rxq->next_to_update);
+ rxq->delta -= i;
+ rxq->next_to_alloc = rxq->next_to_update;
+ } else {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate rx buffers, rxq id:
%d\n",
+ rxq->q_id);
+ }
+
+ return i;
+}
+
+void hinic_rx_free_buffers(struct hinic_rxq *rxq)
+{
+ u16 i;
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_rx_info *rx_info;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rxq->q_depth; i++) {
+ rx_info = &rxq->rx_info[i];
+
+ if (rx_info->buf_dma_addr) {
+ dma_unmap_page(rxq->dev, rx_info->buf_dma_addr,
+ rxq->dma_rx_buff_size,
+ DMA_FROM_DEVICE);
+ rx_info->buf_dma_addr = 0;
+ }
+
+ if (rx_info->page) {
+ __free_pages(rx_info->page, nic_dev->page_order);
+ rx_info->page = NULL;
+ }
+ }
}
-/**
- * hinic_rxq_get_stats - get statistics of Rx Queue
- * @rxq: Logical Rx Queue
- * @stats: return updated stats here
- **/
-void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
+static void hinic_reuse_rx_page(struct hinic_rxq *rxq,
+ struct hinic_rx_info *old_rx_info)
+{
+ struct hinic_rx_info *new_rx_info;
+ u16 nta = rxq->next_to_alloc;
+
+ new_rx_info = &rxq->rx_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0;
+
+ new_rx_info->page = old_rx_info->page;
+ new_rx_info->page_offset = old_rx_info->page_offset;
+ new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr,
+ new_rx_info->page_offset,
+ rxq->buf_len,
+ DMA_FROM_DEVICE);
+}
+
+static bool hinic_add_rx_frag(struct hinic_rxq *rxq,
+ struct hinic_rx_info *rx_info,
+ struct sk_buff *skb, u32 size)
+{
+ struct page *page;
+ u8 *va;
+
+ page = rx_info->page;
+ va = (u8 *)page_address(page) + rx_info->page_offset;
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ dma_sync_single_range_for_cpu(rxq->dev,
+ rx_info->buf_dma_addr,
+ rx_info->page_offset,
+ rxq->buf_len,
+ DMA_FROM_DEVICE);
+
+ if (size <= HINIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ memcpy(__skb_put(skb, size), va,
+ ALIGN(size, sizeof(long))); /*lint !e666*/
+
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ (int)rx_info->page_offset, (int)size, rxq->buf_len);
+
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_info->page_offset ^= rxq->buf_len;
+
+#ifdef HAVE_PAGE_COUNT
+ atomic_add(1, &page->_count);
+#else
+ page_ref_inc(page);
+#endif
+
+ return true;
+}
+
+static void __packaging_skb(struct hinic_rxq *rxq, struct sk_buff *head_skb,
+ u8 sge_num, u32 pkt_len)
+{
+ struct hinic_rx_info *rx_info;
+ struct sk_buff *skb;
+ u8 frag_num = 0;
+ u32 size;
+ u16 sw_ci;
+
+ sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask;
+ skb = head_skb;
+ while (sge_num) {
+ rx_info = &rxq->rx_info[sw_ci];
+ sw_ci = (sw_ci + 1) & rxq->q_mask;
+ if (unlikely(pkt_len > rxq->buf_len)) {
+ size = rxq->buf_len;
+ pkt_len -= rxq->buf_len;
+ } else {
+ size = pkt_len;
+ }
+
+ if (unlikely(frag_num == MAX_SKB_FRAGS)) {
+ frag_num = 0;
+ if (skb == head_skb)
+ skb = skb_shinfo(skb)->frag_list;
+ else
+ skb = skb->next;
+ }
+
+ if (unlikely(skb != head_skb)) {
+ head_skb->len += size;
+ head_skb->data_len += size;
+ head_skb->truesize += rxq->buf_len;
+ }
+
+ if (likely(hinic_add_rx_frag(rxq, rx_info, skb, size))) {
+ hinic_reuse_rx_page(rxq, rx_info);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rxq->dev, rx_info->buf_dma_addr,
+ rxq->dma_rx_buff_size, DMA_FROM_DEVICE);
+ }
+ /* clear contents of buffer_info */
+ rx_info->buf_dma_addr = 0;
+ rx_info->page = NULL;
+ sge_num--;
+ frag_num++;
+ }
+}
+
+static struct sk_buff *hinic_fetch_rx_buffer(struct hinic_rxq *rxq, u32 pkt_len)
+{
+ struct sk_buff *head_skb, *cur_skb, *skb = NULL;
+ struct net_device *netdev = rxq->netdev;
+ u8 sge_num, skb_num;
+ u16 wqebb_cnt = 0;
+
+ head_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE);
+ if (unlikely(!head_skb))
+ return NULL;
+
+ sge_num = (u8)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ if (likely(sge_num <= MAX_SKB_FRAGS))
+ skb_num = 1;
+ else
+ skb_num = (sge_num / MAX_SKB_FRAGS) +
+ ((sge_num % MAX_SKB_FRAGS) ? 1 : 0);
+
+ while (unlikely(skb_num > 1)) {
+ cur_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE);
+ if (unlikely(!cur_skb))
+ goto alloc_skb_fail;
+
+ if (!skb) {
+ skb_shinfo(head_skb)->frag_list = cur_skb;
+ skb = cur_skb;
+ } else {
+ skb->next = cur_skb;
+ skb = cur_skb;
+ }
+
+ skb_num--;
+ }
+
+ prefetchw(head_skb->data);
+ wqebb_cnt = sge_num;
+
+ __packaging_skb(rxq, head_skb, sge_num, pkt_len);
+
+ rxq->cons_idx += wqebb_cnt;
+ rxq->delta += wqebb_cnt;
+
+ return head_skb;
+
+alloc_skb_fail:
+ dev_kfree_skb_any(head_skb);
+ return NULL;
+}
+
+void hinic_rxq_get_stats(struct hinic_rxq *rxq,
+ struct hinic_rxq_stats *stats)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
@@ -71,439 +383,849 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct
hinic_rxq_stats *stats)
u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
- stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
+ stats->packets = rxq_stats->packets;
+ stats->errors = rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ stats->csum_errors = rxq_stats->csum_errors;
+ stats->other_errors = rxq_stats->other_errors;
+ stats->unlock_bp = rxq_stats->unlock_bp;
+ stats->dropped = rxq_stats->dropped;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
-/**
- * rxq_stats_init - Initialize the statistics of specific queue
- * @rxq: Logical Rx Queue
- **/
+void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats)
+{
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->bytes = 0;
+ rxq_stats->packets = 0;
+ rxq_stats->errors = 0;
+ rxq_stats->csum_errors = 0;
+ rxq_stats->unlock_bp = 0;
+ rxq_stats->other_errors = 0;
+ rxq_stats->dropped = 0;
+
+ rxq_stats->alloc_skb_err = 0;
+ u64_stats_update_end(&rxq_stats->syncp);
+}
+
static void rxq_stats_init(struct hinic_rxq *rxq)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
u64_stats_init(&rxq_stats->syncp);
- hinic_rxq_clean_stats(rxq);
+ hinic_rxq_clean_stats(rxq_stats);
}
-/**
- * rx_alloc_skb - allocate skb and map it to dma address
- * @rxq: rx queue
- * @dma_addr: returned dma address for the skb
- *
- * Return skb
- **/
-static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
- dma_addr_t *dma_addr)
-{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct sk_buff *skb;
- dma_addr_t addr;
- int err;
+static void hinic_pull_tail(struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, HINIC_RX_HDR_SIZE);
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, HINIC_RX_HDR_SIZE);
+ frag->page_offset += HINIC_RX_HDR_SIZE;
+ skb->data_len -= HINIC_RX_HDR_SIZE;
+ skb->tail += HINIC_RX_HDR_SIZE;
+}
- skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
- if (!skb) {
- netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
- return NULL;
- }
+static void hinic_rx_csum(struct hinic_rxq *rxq, u32 status,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ u32 csum_err;
- addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
- DMA_FROM_DEVICE);
- err = dma_mapping_error(&pdev->dev, addr);
- if (err) {
- dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
- goto err_rx_map;
+ csum_err = HINIC_GET_RX_CSUM_ERR(status);
+ if (unlikely(csum_err == HINIC_RX_CSUM_IPSU_OTHER_ERR))
+ rxq->rxq_stats.other_errors++;
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (!csum_err) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ /* pkt type is recognized by HW, and csum is err */
+ if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
+ HINIC_RX_CSUM_IPSU_OTHER_ERR)))
+ rxq->rxq_stats.csum_errors++;
+
+ skb->ip_summed = CHECKSUM_NONE;
}
+ }
- *dma_addr = addr;
- return skb;
+#ifdef HAVE_SKBUFF_CSUM_LEVEL
+static void hinic_rx_gro(struct hinic_rxq *rxq, u32 offload_type,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ bool l2_tunnel;
-err_rx_map:
- dev_kfree_skb_any(skb);
- return NULL;
+ if (!(netdev->features & NETIF_F_GRO))
+ return;
+
+ l2_tunnel = HINIC_GET_RX_PKT_TYPE(offload_type) == HINIC_RX_VXLAN_PKT ?
+ 1 : 0;
+
+ if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ /* If we checked the outer header let the stack know */
+ skb->csum_level = 1;
}
+#endif /* HAVE_SKBUFF_CSUM_LEVEL */
-/**
- * rx_unmap_skb - unmap the dma address of the skb
- * @rxq: rx queue
- * @dma_addr: dma address of the skb
- **/
-static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
-{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
+#define HINIC_RX_BP_THD 128
- dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
- DMA_FROM_DEVICE);
+static void hinic_unlock_bp(struct hinic_rxq *rxq, bool bp_en, bool force_en)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int free_wqebbs, err;
+
+ if (bp_en)
+ set_bit(HINIC_RX_STATUS_BP_EN, &rxq->status);
+
+ free_wqebbs = rxq->delta - 1;
+ if (test_bit(HINIC_RX_STATUS_BP_EN, &rxq->status) &&
+ (nic_dev->rq_depth - free_wqebbs) >= nic_dev->bp_upper_thd &&
+ (rxq->bp_cnt >= HINIC_RX_BP_THD || force_en)) {
+ err = hinic_set_iq_enable_mgmt(nic_dev->hwdev, rxq->q_id,
+ nic_dev->bp_lower_thd,
+ rxq->next_to_update);
+ if (!err) {
+ clear_bit(HINIC_RX_STATUS_BP_EN, &rxq->status);
+ rxq->bp_cnt = 0;
+ rxq->rxq_stats.unlock_bp++;
+ } else {
+ nicif_err(nic_dev, drv, netdev, "Failed to set iq enable\n");
+ }
+ }
}
-/**
- * rx_free_skb - unmap and free skb
- * @rxq: rx queue
- * @skb: skb to free
- * @dma_addr: dma address of the skb
- **/
-static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
- dma_addr_t dma_addr)
+static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev,
+ struct sk_buff *skb)
{
- rx_unmap_skb(rxq, dma_addr);
- dev_kfree_skb_any(skb);
+ struct net_device *netdev = nic_dev->netdev;
+ u8 *lb_buf = nic_dev->lb_test_rx_buf;
+ void *frag_data;
+ int lb_len = nic_dev->lb_pkt_len;
+ int pkt_offset, frag_len, i;
+
+ if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
+ nic_dev->lb_test_rx_idx = 0;
+ nicif_warn(nic_dev, drv, netdev, "Loopback test warning, recive too more test
pkt\n");
+ }
+
+ if (skb->len != nic_dev->lb_pkt_len) {
+ nicif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
+ nic_dev->lb_test_rx_idx++;
+ return;
+ }
+
+ pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
+ frag_len = (int)skb_headlen(skb);
+ memcpy((lb_buf + pkt_offset), skb->data, frag_len);
+ pkt_offset += frag_len;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
+ frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ memcpy((lb_buf + pkt_offset), frag_data, frag_len);
+ pkt_offset += frag_len;
+ }
+ nic_dev->lb_test_rx_idx++;
}
-/**
- * rx_alloc_pkts - allocate pkts in rx queue
- * @rxq: rx queue
- *
- * Return number of skbs allocated
- **/
-static int rx_alloc_pkts(struct hinic_rxq *rxq)
+int recv_one_pkt(struct hinic_rxq *rxq, struct hinic_rq_cqe *rx_cqe,
+ u32 pkt_len, u32 vlan_len, u32 status)
{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_rq_wqe *rq_wqe;
- unsigned int free_wqebbs;
- struct hinic_sge sge;
- dma_addr_t dma_addr;
struct sk_buff *skb;
- u16 prod_idx;
- int i;
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u32 offload_type;
- free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
+ skb = hinic_fetch_rx_buffer(rxq, pkt_len);
+ if (unlikely(!skb)) {
+ RXQ_STATS_INC(rxq, alloc_skb_err);
+ return -ENOMEM;
+ }
- /* Limit the allocation chunks */
- if (free_wqebbs > nic_dev->rx_weight)
- free_wqebbs = nic_dev->rx_weight;
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ hinic_pull_tail(skb);
- for (i = 0; i < free_wqebbs; i++) {
- skb = rx_alloc_skb(rxq, &dma_addr);
- if (!skb) {
- netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
- goto skb_out;
- }
+ hinic_rx_csum(rxq, status, skb);
- hinic_set_sge(&sge, dma_addr, skb->len);
+ offload_type = be32_to_cpu(rx_cqe->offload_type);
+#ifdef HAVE_SKBUFF_CSUM_LEVEL
+ hinic_rx_gro(rxq, offload_type, skb);
+#endif
- rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
- &prod_idx);
- if (!rq_wqe) {
- rx_free_skb(rxq, skb, dma_addr);
- goto skb_out;
- }
+#if defined(NETIF_F_HW_VLAN_CTAG_RX)
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
+#else
+ if ((netdev->features & NETIF_F_HW_VLAN_RX) &&
+ HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
+#endif
+ u16 vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
- hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
+ /* if the packet is a vlan pkt, the vid may be 0 */
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
- hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
+ if (unlikely(test_bit(HINIC_LP_TEST, &nic_dev->flags)))
+ hinic_copy_lp_data(nic_dev, skb);
+
+ skb_record_rx_queue(skb, rxq->q_id);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb_has_frag_list(skb)) {
+#ifdef HAVE_NAPI_GRO_FLUSH_OLD
+ napi_gro_flush(&rxq->irq_cfg->napi, false);
+#else
+ napi_gro_flush(&rxq->irq_cfg->napi);
+#endif
+ netif_receive_skb(skb);
+ } else {
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
}
-skb_out:
- if (i) {
- wmb(); /* write all the wqes before update PI */
+ return 0;
+}
- hinic_rq_update(rxq->rq, prod_idx);
- tasklet_schedule(&rxq->rx_task);
+void rx_pass_super_cqe(struct hinic_rxq *rxq, u32 index, u32 pkt_num,
+ struct hinic_rq_cqe *cqe)
+{
+ u8 sge_num = 0;
+ u32 pkt_len;
+
+ while (index < pkt_num) {
+ pkt_len = hinic_get_pkt_len_for_super_cqe
+ (cqe, index == (pkt_num - 1));
+ sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ index++;
}
-
- return i;
+ rxq->cons_idx += sge_num;
+ rxq->delta += sge_num;
}
-/**
- * free_all_rx_skbs - free all skbs in rx queue
- * @rxq: rx queue
- **/
-static void free_all_rx_skbs(struct hinic_rxq *rxq)
+static inline int __recv_supper_cqe(struct hinic_rxq *rxq,
+ struct hinic_rq_cqe *rx_cqe, u32 pkt_info,
+ u32 vlan_len, u32 status, int *pkts,
+ u64 *rx_bytes, u32 *dropped)
{
- struct hinic_rq *rq = rxq->rq;
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_sge sge;
- u16 ci;
+ u32 pkt_len;
+ int i, pkt_num = 0;
+
+ pkt_num = HINIC_GET_RQ_CQE_PKT_NUM(pkt_info);
+ i = 0;
+ while (i < pkt_num) {
+ pkt_len = ((i == (pkt_num - 1)) ?
+ RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) :
+ RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN));
+ if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len,
+ vlan_len, status))) {
+ if (i) {
+ rx_pass_super_cqe(rxq, i,
+ pkt_num,
+ rx_cqe);
+ *dropped += (pkt_num - i);
+ }
+ break;
+ }
+
+ *rx_bytes += pkt_len;
+ (*pkts)++;
+ i++;
+ }
+
+ if (!i)
+ return -EFAULT;
- while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
- if (IS_ERR(hw_wqe))
+ return 0;
+}
+
+#define LRO_PKT_HDR_LEN_IPV4 66
+#define LRO_PKT_HDR_LEN_IPV6 86
+#define LRO_PKT_HDR_LEN(cqe) \
+ (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
+ HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
+
+int hinic_rx_poll(struct hinic_rxq *rxq, int budget)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 status, pkt_len, vlan_len, pkt_info, dropped = 0;
+ struct hinic_rq_cqe *rx_cqe;
+ u64 rx_bytes = 0;
+ u16 sw_ci, num_lro;
+ int pkts = 0, nr_pkts = 0;
+ bool bp_en = false;
+ u16 num_wqe = 0;
+
+ while (unlikely(pkts < budget)) {
+ sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask;
+ rx_cqe = rxq->rx_info[sw_ci].cqe;
+ status = be32_to_cpu(rx_cqe->status);
+
+ if (!HINIC_GET_RX_DONE(status))
break;
- hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
+ /* make sure we read rx_done before packet length */
+ rmb();
+
+ vlan_len = be32_to_cpu(rx_cqe->vlan_len);
+ pkt_info = be32_to_cpu(rx_cqe->pkt_info);
+ pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len);
+
+ if (unlikely(HINIC_GET_SUPER_CQE_EN(pkt_info))) {
+ if (unlikely(__recv_supper_cqe(rxq, rx_cqe, pkt_info,
+ vlan_len, status, &pkts,
+ &rx_bytes, &dropped)))
+ break;
+ nr_pkts += (int)HINIC_GET_RQ_CQE_PKT_NUM(pkt_info);
+ } else {
+ if (recv_one_pkt(rxq, rx_cqe, pkt_len,
+ vlan_len, status))
+ break;
+ rx_bytes += pkt_len;
+ pkts++;
+ nr_pkts++;
+
+ num_lro = HINIC_GET_RX_NUM_LRO(status);
+ if (num_lro) {
+ rx_bytes += ((num_lro - 1) *
+ LRO_PKT_HDR_LEN(rx_cqe));
+
+ num_wqe +=
+ (u16)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ }
+ }
+ if (unlikely(HINIC_GET_RX_BP_EN(status))) {
+ rxq->bp_cnt++;
+ bp_en = true;
+ }
- hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
+ rx_cqe->status = 0;
- rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
+ if (num_wqe >= nic_dev->lro_replenish_thld)
+ break;
}
+
+ if (rxq->delta >= HINIC_RX_BUFFER_WRITE)
+ hinic_rx_fill_buffers(rxq);
+
+ if (unlikely(bp_en || test_bit(HINIC_RX_STATUS_BP_EN, &rxq->status)))
+ hinic_unlock_bp(rxq, bp_en, pkts < budget);
+
+ u64_stats_update_begin(&rxq->rxq_stats.syncp);
+ rxq->rxq_stats.packets += nr_pkts;
+ rxq->rxq_stats.bytes += rx_bytes;
+ rxq->rxq_stats.dropped += dropped;
+ u64_stats_update_end(&rxq->rxq_stats.syncp);
+ return pkts;
}
-/**
- * rx_alloc_task - tasklet for queue allocation
- * @data: rx queue
- **/
-static void rx_alloc_task(unsigned long data)
+static int rx_alloc_cqe(struct hinic_rxq *rxq)
{
- struct hinic_rxq *rxq = (struct hinic_rxq *)data;
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_rx_info *rx_info;
+ struct hinic_rq_cqe *cqe_va;
+ dma_addr_t cqe_pa;
+ u32 cqe_mem_size;
+ int idx;
+
+ cqe_mem_size = sizeof(*rx_info->cqe) * rxq->q_depth;
+ rxq->cqe_start_vaddr = dma_zalloc_coherent(&pdev->dev, cqe_mem_size,
+ &rxq->cqe_start_paddr,
+ GFP_KERNEL);
+ if (!rxq->cqe_start_vaddr) {
+ nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate cqe dma\n");
+ return -ENOMEM;
+ }
- (void)rx_alloc_pkts(rxq);
+ cqe_va = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
+ cqe_pa = rxq->cqe_start_paddr;
+
+ for (idx = 0; idx < rxq->q_depth; idx++) {
+ rx_info = &rxq->rx_info[idx];
+ rx_info->cqe = cqe_va;
+ rx_info->cqe_dma = cqe_pa;
+
+ cqe_va++;
+ cqe_pa += sizeof(*rx_info->cqe);
+ }
+
+ return 0;
}
-/**
- * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
- * @rxq: rx queue
- * @head_skb: the first skb in the list
- * @left_pkt_len: left size of the pkt exclude head skb
- * @ci: consumer index
- *
- * Return number of wqes that used for the left of the pkt
- **/
-static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
- unsigned int left_pkt_len, u16 ci)
+static void rx_free_cqe(struct hinic_rxq *rxq)
{
- struct sk_buff *skb, *curr_skb = head_skb;
- struct hinic_rq_wqe *rq_wqe;
- unsigned int curr_len;
- struct hinic_sge sge;
- int num_wqes = 0;
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ u32 cqe_mem_size;
- while (left_pkt_len > 0) {
- rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
- &skb, &ci);
+ cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
- num_wqes++;
+ dma_free_coherent(&pdev->dev, cqe_mem_size,
+ rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
+}
+
+static int hinic_setup_rx_resources(struct hinic_rxq *rxq,
+ struct net_device *netdev,
+ struct irq_info *entry)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u64 rx_info_sz;
+ int err, pkts;
+
+ rxq->irq_id = entry->irq_id;
+ rxq->msix_entry_idx = entry->msix_entry_idx;
+ rxq->next_to_alloc = 0;
+ rxq->next_to_update = 0;
+ rxq->delta = rxq->q_depth;
+ rxq->q_mask = rxq->q_depth - 1;
+ rxq->cons_idx = 0;
+
+ rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
+ if (!rx_info_sz) {
+ nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size rx info\n");
+ return -EINVAL;
+ }
- hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
+ rxq->rx_info = kzalloc(rx_info_sz, GFP_KERNEL);
+ if (!rxq->rx_info)
+ return -ENOMEM;
- rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+ err = rx_alloc_cqe(rxq);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx cqe\n");
+ goto rx_cqe_err;
+ }
- prefetch(skb->data);
+ pkts = hinic_rx_fill_wqe(rxq);
+ if (pkts != rxq->q_depth) {
+ nicif_err(nic_dev, drv, netdev, "Failed to fill rx wqe\n");
+ err = -ENOMEM;
+ goto rx_pkts_err;
+ }
+ pkts = hinic_rx_fill_buffers(rxq);
+ if (!pkts) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx buffer\n");
+ err = -ENOMEM;
+ goto rx_pkts_err;
+ }
- curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
- left_pkt_len;
+ return 0;
- left_pkt_len -= curr_len;
+rx_pkts_err:
+ rx_free_cqe(rxq);
- __skb_put(skb, curr_len);
+rx_cqe_err:
+ kfree(rxq->rx_info);
+
+ return err;
+}
- if (curr_skb == head_skb)
- skb_shinfo(head_skb)->frag_list = skb;
- else
- curr_skb->next = skb;
+static void hinic_free_rx_resources(struct hinic_rxq *rxq)
+{
+ hinic_rx_free_buffers(rxq);
+ rx_free_cqe(rxq);
+ kfree(rxq->rx_info);
+}
- head_skb->len += skb->len;
- head_skb->data_len += skb->len;
- head_skb->truesize += skb->truesize;
+int hinic_setup_all_rx_resources(struct net_device *netdev,
+ struct irq_info *msix_entires)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i, q_id;
+ int err;
- curr_skb = skb;
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ err = hinic_setup_rx_resources(&nic_dev->rxqs[q_id],
+ nic_dev->netdev,
+ &msix_entires[q_id]);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to set up rxq resource\n");
+ goto init_rxq_err;
+ }
}
- return num_wqes;
+ return 0;
+
+init_rxq_err:
+ for (i = 0; i < q_id; i++)
+ hinic_free_rx_resources(&nic_dev->rxqs[i]);
+
+ return err;
}
-/**
- * rxq_recv - Rx handler
- * @rxq: rx queue
- * @budget: maximum pkts to process
- *
- * Return number of pkts received
- **/
-static int rxq_recv(struct hinic_rxq *rxq, int budget)
+void hinic_free_all_rx_resources(struct net_device *netdev)
{
- struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
- u64 pkt_len = 0, rx_bytes = 0;
- struct hinic_rq_wqe *rq_wqe;
- int num_wqes, pkts = 0;
- struct hinic_sge sge;
- struct sk_buff *skb;
- u16 ci;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id;
- while (pkts < budget) {
- num_wqes = 0;
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++)
+ hinic_free_rx_resources(&nic_dev->rxqs[q_id]);
+}
- rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
- &ci);
- if (!rq_wqe)
- break;
+int hinic_alloc_rxqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_rxq *rxq;
+ u16 num_rxqs = nic_dev->max_qps;
+ u16 q_id;
+ u64 rxq_size;
+
+ rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
+ if (!rxq_size) {
+ nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n");
+ return -EINVAL;
+ }
- hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
+ nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL);
+ if (!nic_dev->rxqs) {
+ nic_err(&pdev->dev, "Failed to allocate rxqs\n");
+ return -ENOMEM;
+ }
- rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+ for (q_id = 0; q_id < num_rxqs; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rxq->netdev = netdev;
+ rxq->dev = &pdev->dev;
+ rxq->q_id = q_id;
+ rxq->buf_len = nic_dev->rx_buff_len;
+ rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len);
+ rxq->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE *
+ nic_dev->rx_buff_len;
+ rxq->q_depth = nic_dev->rq_depth;
+ rxq->q_mask = nic_dev->rq_depth - 1;
+
+ rxq_stats_init(rxq);
+ }
- prefetch(skb->data);
+ return 0;
+}
- pkt_len = sge.len;
+void hinic_free_rxqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
- if (pkt_len <= HINIC_RX_BUF_SZ) {
- __skb_put(skb, pkt_len);
- } else {
- __skb_put(skb, HINIC_RX_BUF_SZ);
- num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
- HINIC_RX_BUF_SZ, ci);
- }
+ hinic_clear_rss_config_user(nic_dev);
+ kfree(nic_dev->rxqs);
+}
+
+void hinic_init_rss_parameters(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
+
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
- hinic_rq_put_wqe(rxq->rq, ci,
- (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
+void hinic_set_default_rss_indir(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (!nic_dev->rss_indir_user)
+ return;
+
+ nicif_info(nic_dev, drv, netdev,
+ "Discard user configured Rx flow hash indirection\n");
+
+ kfree(nic_dev->rss_indir_user);
+ nic_dev->rss_indir_user = NULL;
+}
- skb_record_rx_queue(skb, qp->q_id);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
+static void hinic_maybe_reconfig_rss_indir(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int i;
+
+ if (!nic_dev->rss_indir_user)
+ return;
- napi_gro_receive(&rxq->napi, skb);
+ /* TODO: if dcb is enabled, user can not config rss indir table */
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ goto discard_user_rss_indir;
- pkts++;
- rx_bytes += pkt_len;
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ if (nic_dev->rss_indir_user[i] >= nic_dev->num_qps)
+ goto discard_user_rss_indir;
}
- if (pkts)
- tasklet_schedule(&rxq->rx_task); /* rx_alloc_pkts */
+ return;
- u64_stats_update_begin(&rxq->rxq_stats.syncp);
- rxq->rxq_stats.pkts += pkts;
- rxq->rxq_stats.bytes += rx_bytes;
- u64_stats_update_end(&rxq->rxq_stats.syncp);
+discard_user_rss_indir:
+ hinic_set_default_rss_indir(netdev);
+}
- return pkts;
+static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev)
+{
+ kfree(nic_dev->rss_hkey_user);
+
+ nic_dev->rss_hkey_user_be = NULL;
+ nic_dev->rss_hkey_user = NULL;
+
+ kfree(nic_dev->rss_indir_user);
+ nic_dev->rss_indir_user = NULL;
}
-static int rx_poll(struct napi_struct *napi, int budget)
+static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev,
+ u8 num_tcs, u32 *indir)
{
- struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
- struct hinic_rq *rq = rxq->rq;
- int pkts;
+ u16 num_rss, tc_group_size;
+ int i;
- pkts = rxq_recv(rxq, budget);
- if (pkts >= budget)
- return budget;
+ if (num_tcs)
+ tc_group_size = HINIC_RSS_INDIR_SIZE / num_tcs;
+ else
+ tc_group_size = HINIC_RSS_INDIR_SIZE;
- napi_complete(napi);
- enable_irq(rq->irq);
- return pkts;
+ num_rss = nic_dev->num_rss;
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir[i] = (i / tc_group_size) * num_rss + i % num_rss;
+}
+
+static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
+{
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+
+ hinic_rss_cfg(nic_dev->hwdev, 0, nic_dev->rss_tmpl_idx, 0, prio_tc);
}
-static void rx_add_napi(struct hinic_rxq *rxq)
+/* In rx, iq means cos */
+static u8 hinic_get_iqmap_by_tc(u8 *prio_tc, u8 num_iq, u8 tc)
{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u8 i, map = 0;
- netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
- napi_enable(&rxq->napi);
+ for (i = 0; i < num_iq; i++) {
+ if (prio_tc[i] == tc)
+ map |= (u8)(1U << ((num_iq - 1) - i));
+ }
+
+ return map;
}
-static void rx_del_napi(struct hinic_rxq *rxq)
+static u8 hinic_get_tcid_by_rq(u32 *indir_tbl, u8 num_tcs, u16 rq_id)
{
- napi_disable(&rxq->napi);
- netif_napi_del(&rxq->napi);
+ u16 tc_group_size;
+ int i;
+
+ tc_group_size = HINIC_RSS_INDIR_SIZE / num_tcs;
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ if (indir_tbl[i] == rq_id)
+ return (u8)(i / tc_group_size);
+ }
+ return 0xFF; /* Invalid TC */
}
-static irqreturn_t rx_irq(int irq, void *data)
+#define HINIC_NUM_IQ_PER_FUNC 8
+static void hinic_get_rq2iq_map(struct hinic_nic_dev *nic_dev,
+ u16 num_rq, u8 num_tcs, u8 *prio_tc,
+ u32 *indir_tbl, u8 *map)
{
- struct hinic_rxq *rxq = (struct hinic_rxq *)data;
- struct hinic_rq *rq = rxq->rq;
- struct hinic_dev *nic_dev;
+ u16 qid;
+ u8 tc_id;
- /* Disable the interrupt until napi will be completed */
- disable_irq_nosync(rq->irq);
+ if (!num_tcs)
+ num_tcs = 1;
- nic_dev = netdev_priv(rxq->netdev);
- hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
+ for (qid = 0; qid < num_rq; qid++) {
+ tc_id = hinic_get_tcid_by_rq(indir_tbl, num_tcs, qid);
+ map[qid] = hinic_get_iqmap_by_tc(prio_tc,
+ HINIC_NUM_IQ_PER_FUNC, tc_id);
+ }
- napi_schedule(&rxq->napi);
- return IRQ_HANDLED;
}
-static int rx_request_irq(struct hinic_rxq *rxq)
+int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc,
+ u8 *prio_tc)
{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_rq *rq = rxq->rq;
- struct hinic_qp *qp;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 tmpl_idx = 0xFF;
+ u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
+ u32 *indir_tbl;
+ u8 *hkey;
int err;
- rx_add_napi(rxq);
+ tmpl_idx = nic_dev->rss_tmpl_idx;
- hinic_hwdev_msix_set(hwdev, rq->msix_entry,
- RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
- RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
- RX_IRQ_NO_RESEND_TIMER);
-
- err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
- if (err) {
- rx_del_napi(rxq);
+ /* RSS key */
+ if (nic_dev->rss_hkey_user)
+ hkey = nic_dev->rss_hkey_user;
+ else
+ hkey = default_rss_key;
+ err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hkey);
+ if (err)
return err;
+
+ hinic_maybe_reconfig_rss_indir(netdev);
+ indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL);
+ if (!indir_tbl) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate set hw rss
indir_tbl\n");
+ return -ENOMEM;
}
- qp = container_of(rq, struct hinic_qp, rq);
- cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->mask);
- return irq_set_affinity_hint(rq->irq, &rq->mask);
-}
+ if (nic_dev->rss_indir_user)
+ memcpy(indir_tbl, nic_dev->rss_indir_user,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
+ else
+ hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl);
-static void rx_free_irq(struct hinic_rxq *rxq)
-{
- struct hinic_rq *rq = rxq->rq;
+ hinic_maybe_reconfig_rss_indir(netdev);
+ indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL);
+ if (!indir_tbl) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate set hw rss
indir_tbl\n");
+ return -ENOMEM;
+ }
+
+ if (nic_dev->rss_indir_user)
+ memcpy(indir_tbl, nic_dev->rss_indir_user,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
+ else
+ hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl);
+
+ err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indir_tbl);
+ if (err)
+ goto out;
+
+ err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, nic_dev->rss_type);
+ if (err)
+ goto out;
+
+ err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
+ nic_dev->rss_hash_engine);
+ if (err)
+ goto out;
- irq_set_affinity_hint(rq->irq, NULL);
- free_irq(rq->irq, rxq);
- rx_del_napi(rxq);
+ err = hinic_rss_cfg(nic_dev->hwdev, rss_en, tmpl_idx, num_tc, prio_tc);
+ if (err)
+ goto out;
+
+ kfree(indir_tbl);
+ return 0;
+
+out:
+ kfree(indir_tbl);
+ return err;
}
-/**
- * hinic_init_rxq - Initialize the Rx Queue
- * @rxq: Logical Rx Queue
- * @rq: Hardware Rx Queue to connect the Logical queue with
- * @netdev: network device to connect the Logical queue with
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
- struct net_device *netdev)
+static int hinic_rss_init(struct hinic_nic_dev *nic_dev, u8 *rq2iq_map)
{
- struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
- int err, pkts, irqname_len;
-
- rxq->netdev = netdev;
- rxq->rq = rq;
+ struct net_device *netdev = nic_dev->netdev;
+ u32 *indir_tbl;
+ u8 cos, num_tc = 0;
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ int err;
- rxq_stats_init(rxq);
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
+ num_tc = nic_dev->max_cos;
+ for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) {
+ if (cos < HINIC_DCB_COS_MAX - nic_dev->max_cos)
+ prio_tc[cos] = nic_dev->max_cos - 1;
+ else
+ prio_tc[cos] = (HINIC_DCB_COS_MAX - 1) - cos;
+ }
+ } else {
+ num_tc = 0;
+ }
- irqname_len = snprintf(NULL, 0, "hinic_rxq%d", qp->q_id) + 1;
- rxq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
- if (!rxq->irq_name)
+ indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL);
+ if (!indir_tbl) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate rss init indir_tbl\n");
return -ENOMEM;
+ }
- sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id);
+ if (nic_dev->rss_indir_user)
+ memcpy(indir_tbl, nic_dev->rss_indir_user,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
+ else
+ hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl);
+ err = hinic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc);
+ if (err) {
+ kfree(indir_tbl);
+ return err;
+ }
- tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq);
+ hinic_get_rq2iq_map(nic_dev, nic_dev->num_qps, num_tc,
+ prio_tc, indir_tbl, rq2iq_map);
- pkts = rx_alloc_pkts(rxq);
- if (!pkts) {
- err = -ENOMEM;
- goto err_rx_pkts;
+ kfree(indir_tbl);
+ return 0;
+}
+
+int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+
+ /* RSS must be enable when dcb is enabled */
+ return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
+}
+
+int hinic_rx_configure(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 rq2iq_map[HINIC_MAX_NUM_RQ];
+ int err;
+
+ /* Set all rq mapping to all iq in default */
+ memset(rq2iq_map, 0xFF, sizeof(rq2iq_map));
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ err = hinic_rss_init(nic_dev, rq2iq_map);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to init rss\n");
+ return -EFAULT;
+ }
}
- err = rx_request_irq(rxq);
+ err = hinic_dcb_set_rq_iq_mapping(nic_dev->hwdev,
+ hinic_func_max_qnum(nic_dev->hwdev),
+ rq2iq_map);
if (err) {
- netdev_err(netdev, "Failed to request Rx irq\n");
- goto err_req_rx_irq;
+ nicif_err(nic_dev, drv, netdev, "Failed to set rq_iq mapping\n");
+ goto set_rq_cos_mapping_err;
}
return 0;
-err_req_rx_irq:
-err_rx_pkts:
- tasklet_kill(&rxq->rx_task);
- free_all_rx_skbs(rxq);
- devm_kfree(&netdev->dev, rxq->irq_name);
+set_rq_cos_mapping_err:
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ hinic_rss_deinit(nic_dev);
+
return err;
}
-/**
- * hinic_clean_rxq - Clean the Rx Queue
- * @rxq: Logical Rx Queue
- **/
-void hinic_clean_rxq(struct hinic_rxq *rxq)
+void hinic_rx_remove_configure(struct net_device *netdev)
{
- struct net_device *netdev = rxq->netdev;
-
- rx_free_irq(rxq);
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
- tasklet_kill(&rxq->rx_task);
- free_all_rx_skbs(rxq);
- devm_kfree(&netdev->dev, rxq->irq_name);
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ hinic_rss_deinit(nic_dev);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 27c9af4b1c12..f496516638ec 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -16,40 +16,118 @@
#ifndef HINIC_RX_H
#define HINIC_RX_H
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/interrupt.h>
+/*rx cqe checksum err*/
+#define HINIC_RX_CSUM_IP_CSUM_ERR BIT(0)
+#define HINIC_RX_CSUM_TCP_CSUM_ERR BIT(1)
+#define HINIC_RX_CSUM_UDP_CSUM_ERR BIT(2)
+#define HINIC_RX_CSUM_IGMP_CSUM_ERR BIT(3)
+#define HINIC_RX_CSUM_ICMPv4_CSUM_ERR BIT(4)
+#define HINIC_RX_CSUM_ICMPv6_CSUM_ERR BIT(5)
+#define HINIC_RX_CSUM_SCTP_CRC_ERR BIT(6)
+#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7)
+#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8)
-#include "hinic_hw_qp.h"
+#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+
+#define HINIC_RX_BP_LOWER_THD 200
+#define HINIC_RX_BP_UPPER_THD 400
+
+#define HINIC_SUPPORT_LRO_ADAP_QPS_MAX 16
+
+enum {
+ HINIC_RX_STATUS_BP_EN,
+};
struct hinic_rxq_stats {
- u64 pkts;
- u64 bytes;
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 csum_errors;
+ u64 other_errors;
+ u64 unlock_bp;
+ u64 dropped;
- struct u64_stats_sync syncp;
+ u64 alloc_skb_err;
+#ifdef HAVE_NDO_GET_STATS64
+ struct u64_stats_sync syncp;
+#else
+ struct u64_stats_sync_empty syncp;
+#endif
+};
+
+struct hinic_rx_info {
+ dma_addr_t buf_dma_addr;
+
+ struct hinic_rq_cqe *cqe;
+ dma_addr_t cqe_dma;
+ struct page *page;
+ u32 page_offset;
+ struct hinic_rq_wqe *rq_wqe;
};
struct hinic_rxq {
- struct net_device *netdev;
- struct hinic_rq *rq;
+ struct net_device *netdev;
+
+ u16 q_id;
+ u16 q_depth;
+ u16 q_mask;
+
+ u16 buf_len;
+ u32 rx_buff_shift;
+ u32 dma_rx_buff_size;
+
+ struct hinic_rxq_stats rxq_stats;
+ int cons_idx;
+ int delta;
- struct hinic_rxq_stats rxq_stats;
+ u32 irq_id;
+ u16 msix_entry_idx;
- char *irq_name;
+ struct hinic_rx_info *rx_info;
- struct tasklet_struct rx_task;
+ struct hinic_irq *irq_cfg;
+ u16 next_to_alloc;
+ u16 next_to_update;
+ struct device *dev; /* device for DMA mapping */
+
+ u32 bp_cnt;
+ unsigned long status;
+ dma_addr_t cqe_start_paddr;
+ void *cqe_start_vaddr;
+ u64 last_moder_packets;
+ u64 last_moder_bytes;
+ u8 last_coalesc_timer_cfg;
+ u8 last_pending_limt;
- struct napi_struct napi;
};
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq);
+void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats);
+
+void hinic_rxq_get_stats(struct hinic_rxq *rxq,
+ struct hinic_rxq_stats *stats);
+
+int hinic_alloc_rxqs(struct net_device *netdev);
+
+void hinic_free_rxqs(struct net_device *netdev);
+
+void hinic_init_rss_parameters(struct net_device *netdev);
+
+void hinic_set_default_rss_indir(struct net_device *netdev);
+
+int hinic_setup_all_rx_resources(struct net_device *netdev,
+ struct irq_info *msix_entires);
+
+void hinic_free_all_rx_resources(struct net_device *netdev);
+
+void hinic_rx_remove_configure(struct net_device *netdev);
+
+int hinic_rx_configure(struct net_device *netdev);
-void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
+int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc,
+ u8 *prio_tc);
-int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
- struct net_device *netdev);
+int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc);
-void hinic_clean_rxq(struct hinic_rxq *rxq);
+int hinic_rx_poll(struct hinic_rxq *rxq, int budget);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
new file mode 100644
index 000000000000..e044b4115dfd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
+#include "hinic_sriov.h"
+#include "hinic_lld.h"
+
+#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE))
+ssize_t hinic_sriov_totalvfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%d\n", pci_sriov_get_totalvfs(pdev));
+}
+
+ssize_t hinic_sriov_numvfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%d\n", pci_num_vf(pdev));
+}
+
+/*lint -save -e713*/
+ssize_t hinic_sriov_numvfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+ u16 num_vfs;
+ int cur_vfs, total_vfs;
+
+ ret = kstrtou16(buf, 0, &num_vfs);
+ if (ret < 0)
+ return ret;
+
+ cur_vfs = pci_num_vf(pdev);
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+
+ if (num_vfs > total_vfs)
+ return -ERANGE;
+
+ if (num_vfs == cur_vfs)
+ return count; /* no change */
+
+ if (num_vfs == 0) {
+ /* disable VFs */
+ ret = hinic_pci_sriov_configure(pdev, 0);
+ if (ret < 0)
+ return ret;
+ return count;
+ }
+
+ /* enable VFs */
+ if (cur_vfs) {
+ nic_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d
VFs\n",
+ cur_vfs, num_vfs);
+ return -EBUSY;
+ }
+
+ ret = hinic_pci_sriov_configure(pdev, num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (ret != num_vfs)
+ nic_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
+ num_vfs, ret);
+
+ return count;
+}
+
+/*lint -restore*/
+#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */
+
+int hinic_pci_sriov_disable(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct hinic_sriov_info *sriov_info;
+ u16 tmp_vfs;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+ /* if SR-IOV is already disabled then there is nothing to do */
+ if (!sriov_info->sriov_enabled)
+ return 0;
+
+ if (test_and_set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) {
+ nic_err(&sriov_info->pdev->dev,
+ "SR-IOV disable in process, please wait");
+ return -EPERM;
+ }
+
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+ nic_warn(&sriov_info->pdev->dev, "Unloading driver while VFs are
assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ }
+ sriov_info->sriov_enabled = false;
+
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(sriov_info->pdev);
+
+ tmp_vfs = (u16)sriov_info->num_vfs;
+ sriov_info->num_vfs = 0;
+ hinic_deinit_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW(tmp_vfs - 1));
+
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+#endif
+
+ return 0;
+}
+
+int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct hinic_sriov_info *sriov_info;
+ int err = 0;
+ int pre_existing_vfs = 0;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+
+ if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
+ nic_err(&sriov_info->pdev->dev,
+ "SR-IOV enable in process, please wait, num_vfs %d\n",
+ num_vfs);
+ return -EPERM;
+ }
+
+ pre_existing_vfs = pci_num_vf(sriov_info->pdev);
+
+ if (num_vfs > pci_sriov_get_totalvfs(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return -ERANGE;
+ }
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs) {
+ err = hinic_pci_sriov_disable(sriov_info->pdev);
+ if (err) {
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+ } else if (pre_existing_vfs == num_vfs) {
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return num_vfs;
+ }
+
+ err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW((u16)num_vfs - 1));
+ if (err) {
+ nic_err(&sriov_info->pdev->dev,
+ "Failed to init vf in hardware before enable sriov, error %d\n",
+ err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ err = pci_enable_sriov(sriov_info->pdev, num_vfs);
+ if (err) {
+ nic_err(&sriov_info->pdev->dev,
+ "Failed to enable SR-IOV, error %d\n", err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ sriov_info->sriov_enabled = true;
+ sriov_info->num_vfs = num_vfs;
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+
+ return num_vfs;
+#else
+
+ return 0;
+#endif
+}
+
+static bool hinic_is_support_sriov_configure(struct pci_dev *pdev)
+{
+ enum hinic_init_state state = hinic_get_init_state(pdev);
+ struct hinic_sriov_info *sriov_info;
+
+ if (state < HINIC_INIT_STATE_NIC_INITED) {
+ nic_err(&pdev->dev, "NIC device not initialized, don't support to
configure sriov\n");
+ return false;
+ }
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+ if (FUNC_SRIOV_FIX_NUM_VF(sriov_info->hwdev)) {
+ nic_err(&pdev->dev, "Don't support to changed sriov
configuration\n");
+ return false;
+ }
+
+ return true;
+}
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+
+ if (!hinic_is_support_sriov_configure(dev))
+ return -EFAULT;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+
+ if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
+ return -EFAULT;
+
+ if (!num_vfs)
+ return hinic_pci_sriov_disable(dev);
+ else
+ return hinic_pci_sriov_enable(dev, num_vfs);
+}
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf mac\n");
+ return -EOPNOTSUPP;
+ }
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (!is_valid_ether_addr(mac) || /*lint !e574*/
+ vf >= sriov_info->num_vfs) /*lint !e574*/
+ return -EINVAL;
+
+ err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
+ if (err)
+ return err;
+
+ nic_info(&sriov_info->pdev->dev, "Setting MAC %pM on VF %d\n", mac,
vf);
+ nic_info(&sriov_info->pdev->dev, "Reload the VF driver to make this
change effective.");
+
+ return 0;
+}
+
+/*lint -save -e574 -e734*/
+#ifdef IFLA_VF_MAX
+#ifdef IFLA_VF_VLAN_INFO_MAX
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+#else
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+#endif
+{
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ u16 vlanprio, cur_vlanprio;
+ int err = 0;
+
+ if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf vlan\n");
+ return -EOPNOTSUPP;
+ }
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+#ifdef IFLA_VF_VLAN_INFO_MAX
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+#endif
+ vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
+ cur_vlanprio = hinic_vf_info_vlanprio(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* duplicate request, so just return success */
+ if (vlanprio == cur_vlanprio)
+ return 0;
+
+ if (vlan || qos) {
+ if (cur_vlanprio)
+ err = hinic_kill_vf_vlan(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ if (err)
+ goto out;
+ err = hinic_add_vf_vlan(sriov_info->hwdev, OS_VF_ID_TO_HW(vf),
+ vlan, qos);
+ } else {
+ err = hinic_kill_vf_vlan(sriov_info->hwdev, OS_VF_ID_TO_HW(vf));
+ }
+
+out:
+ return err;
+}
+#endif
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
+
+ return 0;
+}
+
+/**
+ * hinic_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ const char *vf_link[] = {"auto", "enable", "disable"};
+ int err;
+
+ if (FUNC_FORCE_LINK_UP(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf link state\n");
+ return -EOPNOTSUPP;
+ }
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ /* validate the request */
+ if (vf_id >= sriov_info->num_vfs) {
+ nicif_err(adapter, drv, netdev,
+ "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ err = hinic_set_vf_link_state(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf_id), link);
+
+ if (!err)
+ nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n",
+ vf_id, vf_link[link]);
+
+ return err;
+}
+
+#define HINIC_TX_RATE_TABLE_FULL 12
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate)
+#else
+int hinic_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate)
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+{
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct nic_port_info port_info = {0};
+ struct hinic_sriov_info *sriov_info;
+#ifndef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ int min_tx_rate = 0;
+#endif
+ u8 link_status = 0;
+ u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000};
+ int err = 0;
+
+ if (!FUNC_SUPPORT_RATE_LIMIT(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf rate limit\n");
+ return -EOPNOTSUPP;
+ }
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+
+ /* verify VF is active */
+ if (vf >= sriov_info->num_vfs) {
+ nicif_err(adapter, drv, netdev, "VF number must be less than %d\n",
+ sriov_info->num_vfs);
+ return -EINVAL;
+ }
+
+ if (max_tx_rate < min_tx_rate) {
+ nicif_err(adapter, drv, netdev, "Invalid rate, max rate %d must greater than min
rate %d\n",
+ max_tx_rate, min_tx_rate);
+ return -EINVAL;
+ }
+
+ err = hinic_get_link_state(adapter->hwdev, &link_status);
+ if (err) {
+ nicif_err(adapter, drv, netdev,
+ "Get link status failed when set vf tx rate\n");
+ return -EIO;
+ }
+
+ if (!link_status) {
+ nicif_err(adapter, drv, netdev,
+ "Link status must be up when set vf tx rate\n");
+ return -EINVAL;
+ }
+
+ err = hinic_get_port_info(adapter->hwdev, &port_info);
+ if (err || port_info.speed > LINK_SPEED_100GB)
+ return -EIO;
+
+ /* rate limit cannot be less than 0 and greater than link speed */
+ if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) {
+ nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %d]\n",
+ speeds[port_info.speed]);
+ return -EINVAL;
+ }
+
+ err = hinic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf),
+ max_tx_rate, min_tx_rate);
+ if (err) {
+ nicif_err(adapter, drv, netdev,
+ "Unable to set VF %d max rate %d min rate %d%s\n",
+ vf, max_tx_rate, min_tx_rate,
+ err == HINIC_TX_RATE_TABLE_FULL ?
+ ", tx rate profile is full" : "");
+ return -EIO;
+ }
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ nicif_info(adapter, drv, netdev,
+ "Set VF %d max tx rate %d min tx rate %d successfully\n",
+ vf, max_tx_rate, min_tx_rate);
+#else
+ nicif_info(adapter, drv, netdev,
+ "Set VF %d tx rate %d successfully\n",
+ vf, max_tx_rate);
+#endif
+
+ return 0;
+} /*lint -restore*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
new file mode 100644
index 000000000000..7e8ff3a52dc0
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_SRIOV_H
+#define HINIC_SRIOV_H
+
+#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE))
+ssize_t hinic_sriov_totalvfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ssize_t hinic_sriov_numvfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ssize_t hinic_sriov_numvfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */
+
+enum hinic_sriov_state {
+ HINIC_SRIOV_DISABLE,
+ HINIC_SRIOV_ENABLE,
+ HINIC_FUNC_REMOVE,
+};
+
+struct hinic_sriov_info {
+ struct pci_dev *pdev;
+ void *hwdev;
+ bool sriov_enabled;
+ unsigned int num_vfs;
+ unsigned long state;
+};
+
+int hinic_pci_sriov_disable(struct pci_dev *dev);
+int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+#ifdef IFLA_VF_MAX
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+#ifdef IFLA_VF_VLAN_INFO_MAX
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
+#else
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+#endif
+
+int hinic_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate);
+#else
+int hinic_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate);
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+#endif
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5fca0356c9c..9c0b13c64a65 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -13,65 +13,47 @@
*
*/
-#include <linux/kernel.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
#include <linux/netdevice.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/skbuff.h>
-#include <linux/smp.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_dev.h"
-#include "hinic_dev.h"
-#include "hinic_tx.h"
-
-#define TX_IRQ_NO_PENDING 0
-#define TX_IRQ_NO_COALESC 0
-#define TX_IRQ_NO_LLI_TIMER 0
-#define TX_IRQ_NO_CREDIT 0
-#define TX_IRQ_NO_RESEND_TIMER 0
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
-#define CI_UPDATE_NO_PENDING 0
-#define CI_UPDATE_NO_COALESC 0
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_dev.h"
+#include "hinic_qp.h"
+#include "hinic_tx.h"
-#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
+#define MIN_SKB_LEN 17
+#define MAX_PAYLOAD_OFFSET 221
-#define MIN_SKB_LEN 64
+#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1))
-/**
- * hinic_txq_clean_stats - Clean the statistics of specific queue
- * @txq: Logical Tx Queue
- **/
-void hinic_txq_clean_stats(struct hinic_txq *txq)
-{
- struct hinic_txq_stats *txq_stats = &txq->txq_stats;
-
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->pkts = 0;
- txq_stats->bytes = 0;
- txq_stats->tx_busy = 0;
- txq_stats->tx_wake = 0;
- txq_stats->tx_dropped = 0;
- u64_stats_update_end(&txq_stats->syncp);
+#define TXQ_STATS_INC(txq, field) \
+{ \
+ u64_stats_update_begin(&(txq)->txq_stats.syncp); \
+ (txq)->txq_stats.field++; \
+ u64_stats_update_end(&(txq)->txq_stats.syncp); \
}
-/**
- * hinic_txq_get_stats - get statistics of Tx Queue
- * @txq: Logical Tx Queue
- * @stats: return updated stats here
- **/
-void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
+void hinic_txq_get_stats(struct hinic_txq *txq,
+ struct hinic_txq_stats *stats)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
@@ -79,451 +61,1223 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct
hinic_txq_stats *stats)
u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
- stats->pkts = txq_stats->pkts;
- stats->bytes = txq_stats->bytes;
- stats->tx_busy = txq_stats->tx_busy;
- stats->tx_wake = txq_stats->tx_wake;
- stats->tx_dropped = txq_stats->tx_dropped;
+ stats->bytes = txq_stats->bytes;
+ stats->packets = txq_stats->packets;
+ stats->busy = txq_stats->busy;
+ stats->wake = txq_stats->wake;
+ stats->dropped = txq_stats->dropped;
+ stats->big_frags_pkts = txq_stats->big_frags_pkts;
+ stats->big_udp_pkts = txq_stats->big_udp_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
-/**
- * txq_stats_init - Initialize the statistics of specific queue
- * @txq: Logical Tx Queue
- **/
+void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats)
+{
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->bytes = 0;
+ txq_stats->packets = 0;
+ txq_stats->busy = 0;
+ txq_stats->wake = 0;
+ txq_stats->dropped = 0;
+ txq_stats->big_frags_pkts = 0;
+ txq_stats->big_udp_pkts = 0;
+
+ txq_stats->ufo_pkt_unsupport = 0;
+ txq_stats->ufo_linearize_err = 0;
+ txq_stats->ufo_alloc_skb_err = 0;
+ txq_stats->skb_pad_err = 0;
+ txq_stats->frag_len_overflow = 0;
+ txq_stats->offload_cow_skb_err = 0;
+ txq_stats->alloc_cpy_frag_err = 0;
+ txq_stats->map_cpy_frag_err = 0;
+ txq_stats->map_frag_err = 0;
+ u64_stats_update_end(&txq_stats->syncp);
+}
+
static void txq_stats_init(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
u64_stats_init(&txq_stats->syncp);
- hinic_txq_clean_stats(txq);
+ hinic_txq_clean_stats(txq_stats);
}
-/**
- * tx_map_skb - dma mapping for skb and return sges
- * @nic_dev: nic device
- * @skb: the skb
- * @sges: returned sges
- *
- * Return 0 - Success, negative - Failure
- **/
-static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
- struct hinic_sge *sges)
+inline void hinic_set_buf_desc(struct hinic_sq_bufdesc *buf_descs,
+ dma_addr_t addr, u32 len)
+{
+ buf_descs->hi_addr = cpu_to_be32(upper_32_bits(addr));
+ buf_descs->lo_addr = cpu_to_be32(lower_32_bits(addr));
+ buf_descs->len = cpu_to_be32(len);
+}
+
+static int tx_map_skb(struct hinic_nic_dev *nic_dev, struct sk_buff *skb,
+ struct hinic_txq *txq, struct hinic_tx_info *tx_info,
+ struct hinic_sq_bufdesc *buf_descs)
{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct skb_frag_struct *frag;
- dma_addr_t dma_addr;
- int i, j;
-
- dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, dma_addr)) {
- dev_err(&pdev->dev, "Failed to map Tx skb data\n");
- return -EFAULT;
- }
-
- hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
-
- for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
-
- dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, dma_addr)) {
- dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
- goto err_tx_map;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_dma_len *dma_len = tx_info->dma_len;
+ struct skb_frag_struct *frag = NULL;
+ u16 base_nr_frags;
+ int j, i = 0;
+ int node, err = 0;
+ u32 nsize, cpy_nsize = 0;
+ u8 *vaddr, *cpy_buff = NULL;
+ u16 skb_nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)) {
+ for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++)
+ cpy_nsize +=
+ skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+ if (!cpy_nsize) {
+ TXQ_STATS_INC(txq, alloc_cpy_frag_err);
+ return -EINVAL;
+ }
+
+ node = dev_to_node(&nic_dev->pdev->dev);
+ if (node == NUMA_NO_NODE)
+ cpy_buff = kzalloc(cpy_nsize,
+ GFP_ATOMIC | __GFP_NOWARN);
+ else
+ cpy_buff = kzalloc_node(cpy_nsize,
+ GFP_ATOMIC | __GFP_NOWARN,
+ node);
+
+ if (!cpy_buff) {
+ TXQ_STATS_INC(txq, alloc_cpy_frag_err);
+ return -ENOMEM;
+ }
+
+ tx_info->cpy_buff = cpy_buff;
+
+ for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ nsize = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+
+ vaddr = _kc_kmap_atomic(skb_frag_page(frag));
+ memcpy(cpy_buff, vaddr + frag->page_offset, nsize);
+ _kc_kunmap_atomic(vaddr);
+ cpy_buff += nsize;
}
+ }
- hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
+ dma_len[0].dma = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_len[0].dma)) {
+ TXQ_STATS_INC(txq, map_frag_err);
+ err = -EFAULT;
+ goto map_single_err;
}
+ dma_len[0].len = skb_headlen(skb);
+ hinic_set_buf_desc(&buf_descs[0], dma_len[0].dma,
+ dma_len[0].len);
+
+ if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)
+ base_nr_frags = HINIC_MAX_SKB_NR_FRAGE - 1;
+ else
+ base_nr_frags = skb_nr_frags;
+
+ for (i = 0; i < base_nr_frags; ) {
+ frag = &(skb_shinfo(skb)->frags[i]);
+ i++;
+ dma_len[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_len[i].dma)) {
+ TXQ_STATS_INC(txq, map_frag_err);
+ i--;
+ err = -EFAULT;
+ goto frag_map_err;
+ }
+ dma_len[i].len = skb_frag_size(frag);
- return 0;
+ hinic_set_buf_desc(&buf_descs[i], dma_len[i].dma,
+ dma_len[i].len);
+ }
-err_tx_map:
- for (j = 0; j < i; j++)
- dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
- sges[j + 1].len, DMA_TO_DEVICE);
+ if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE) {
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].dma =
+ dma_map_single(&pdev->dev, tx_info->cpy_buff,
+ cpy_nsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].dma)) {
+ TXQ_STATS_INC(txq, map_cpy_frag_err);
+ err = -EFAULT;
+ goto fusion_map_err;
+ }
- dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].len = cpy_nsize;
+ hinic_set_buf_desc(&buf_descs[HINIC_MAX_SKB_NR_FRAGE],
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].dma,
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].len);
+ }
+
+ return 0;
+
+fusion_map_err:
+frag_map_err:
+ for (j = 0; j < i;) {
+ j++;
+ dma_unmap_page(&pdev->dev, dma_len[j].dma,
+ dma_len[j].len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(&pdev->dev, dma_len[0].dma, dma_len[0].len,
DMA_TO_DEVICE);
- return -EFAULT;
+
+map_single_err:
+ kfree(tx_info->cpy_buff);
+ tx_info->cpy_buff = NULL;
+
+ return err;
}
-/**
- * tx_unmap_skb - unmap the dma address of the skb
- * @nic_dev: nic device
- * @skb: the skb
- * @sges: the sges that are connected to the skb
- **/
-static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
- struct hinic_sge *sges)
+static inline void tx_unmap_skb(struct hinic_nic_dev *nic_dev,
+ struct sk_buff *skb,
+ struct hinic_dma_len *dma_len)
{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
+ struct pci_dev *pdev = nic_dev->pdev;
+ u16 nr_frags;
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
- dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
- sges[i + 1].len, DMA_TO_DEVICE);
+ if (skb_shinfo(skb)->nr_frags > HINIC_MAX_SKB_NR_FRAGE)
+ nr_frags = HINIC_MAX_SKB_NR_FRAGE;
+ else
+ nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
- DMA_TO_DEVICE);
+ for (i = 0; i < nr_frags; ) {
+ i++;
+ dma_unmap_page(&pdev->dev,
+ dma_len[i].dma,
+ dma_len[i].len, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, dma_len[0].dma,
+ dma_len[0].len, DMA_TO_DEVICE);
}
-netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+union hinic_ip {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_ip *ip,
+ union hinic_l4 *l4,
+ enum tx_offload_type offload_type,
+ enum sq_l3_type *l3_type, u8 *l4_proto)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct netdev_queue *netdev_txq;
- int nr_sges, err = NETDEV_TX_OK;
- struct hinic_sq_wqe *sq_wqe;
- unsigned int wqe_size;
- struct hinic_txq *txq;
- struct hinic_qp *qp;
- u16 prod_idx;
+ unsigned char *exthdr;
+
+ if (ip->v4->version == 4) {
+ *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
+ IPV4_PKT_NO_CHKSUM_OFFLOAD : IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ *l4_proto = ip->v4->protocol;
+
+#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
+ /* inner_transport_header is wrong in centos7.0 and suse12.1 */
+ l4->hdr = ip->hdr + ((u8)ip->v4->ihl << 2);
+#endif
+ } else if (ip->v4->version == 6) {
+ *l3_type = IPV6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ __be16 frag_off = 0;
+#ifndef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
+ ipv6_skip_exthdr(skb, (int)(exthdr - skb->data),
+ l4_proto, &frag_off);
+#else
+ int pld_off = 0;
+
+ pld_off = ipv6_skip_exthdr(skb,
+ (int)(exthdr -
+ skb->data),
+ l4_proto,
+ &frag_off);
+ l4->hdr = skb->data + pld_off;
+ } else {
+ l4->hdr = exthdr;
+ #endif
+ }
+ } else {
+ *l3_type = UNKNOWN_L3TYPE;
+ *l4_proto = 0;
+ }
+}
+
+static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
+ enum tx_offload_type offload_type, u8 l4_proto,
+ enum sq_l4offload_type *l4_offload,
+ u32 *l4_len, u32 *offset)
+{
+ *offset = 0;
+ *l4_len = 0;
+ *l4_offload = OFFLOAD_DISABLE;
+
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = TCP_OFFLOAD_ENABLE;
+ *l4_len = l4->tcp->doff * 4; /* doff in unit of 4B */
+ /* To keep same with TSO, payload offset begins from paylaod */
+ *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = UDP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct udphdr);
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_SCTP:
+ /* only csum offload support sctp */
+ if (offload_type != TX_OFFLOAD_CSUM)
+ break;
- txq = &nic_dev->txqs[skb->queue_mapping];
- qp = container_of(txq->sq, struct hinic_qp, sq);
+ *l4_offload = SCTP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct sctphdr);
+ /* To keep same with UFO, payload offset
+ * begins from L4 header
+ */
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
- if (skb->len < MIN_SKB_LEN) {
- if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
- netdev_err(netdev, "Failed to pad skb\n");
- goto update_error_stats;
+ default:
+ break;
+ }
+}
+
+static int hinic_tx_csum(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ union hinic_ip ip;
+ union hinic_l4 l4;
+ enum sq_l3_type l3_type;
+ enum sq_l4offload_type l4_offload;
+ u32 network_hdr_len;
+ u32 offset, l4_len;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ u32 l4_tunnel_len;
+ u32 tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+ ip.hdr = skb_network_header(skb);
+
+ if (ip.v4->version == 4) {
+ l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ } else if (ip.v4->version == 6) {
+ l3_type = IPV6_PKT;
+#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
+ tunnel_type = TUNNEL_UDP_CSUM;
+#endif
+ } else {
+ l3_type = UNKNOWN_L3TYPE;
}
- skb->len = MIN_SKB_LEN;
+ hinic_task_set_outter_l3(task, l3_type,
+ skb_network_header_len(skb));
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
}
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM,
+ &l3_type, &l4_proto);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto,
+ &l4_offload, &l4_len, &offset);
+
+#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
+ /* get wrong network header length using skb_network_header_len */
+ if (unlikely(l3_type == UNKNOWN_L3TYPE))
+ network_hdr_len = 0;
+ else
+ network_hdr_len = l4.hdr - ip.hdr;
+
+ /* payload offset must be setted */
+ if (unlikely(!offset)) {
+ if (l3_type == UNKNOWN_L3TYPE)
+ offset = ip.hdr - skb->data;
+ else if (l4_offload == OFFLOAD_DISABLE)
+ offset = ip.hdr - skb->data + network_hdr_len;
+ }
+#endif
+
+ hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
- nr_sges = skb_shinfo(skb)->nr_frags + 1;
- if (nr_sges > txq->max_sges) {
- netdev_err(netdev, "Too many Tx sges\n");
- goto skb_error;
+ hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
+
+ return 1;
+}
+
+static __sum16 csum_magic(union hinic_ip *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ union hinic_ip ip;
+ union hinic_l4 l4;
+ enum sq_l3_type l3_type;
+ enum sq_l4offload_type l4_offload;
+ u32 network_hdr_len;
+ u32 offset, l4_len;
+ u32 ip_identify = 0;
+ u8 l4_proto;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->encapsulation) {
+ u32 l4_tunnel_len;
+ u32 tunnel_type = 0;
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ } else if (ip.v4->version == 6) {
+ l3_type = IPV6_PKT;
+ } else {
+ l3_type = 0;
+ }
+
+ hinic_task_set_outter_l3(task, l3_type,
+ skb_network_header_len(skb));
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ tunnel_type = TUNNEL_UDP_CSUM;
+ } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
+#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
+ if (l3_type == IPV6_PKT) {
+ tunnel_type = TUNNEL_UDP_CSUM;
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ } else {
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
+ }
+#else
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
+#endif
+ }
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4)
+ ip.v4->tot_len = 0;
+ else
+ ip.v6->payload_len = 0;
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO,
+ &l3_type, &l4_proto);
+
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO
+ else if (l4_proto == IPPROTO_UDP && ip.v4->version == 6)
+ ip_identify = be32_to_cpu(skb_shinfo(skb)->ip6_frag_id);
+ /* changed to big endiant is just to keep the same code style here */
+#endif
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto,
+ &l4_offload, &l4_len, &offset);
+
+#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
+ if (unlikely(l3_type == UNKNOWN_L3TYPE))
+ network_hdr_len = 0;
+ else
+ network_hdr_len = l4.hdr - ip.hdr;
+
+ if (unlikely(!offset)) {
+ if (l3_type == UNKNOWN_L3TYPE)
+ offset = ip.hdr - skb->data;
+ else if (l4_offload == OFFLOAD_DISABLE)
+ offset = ip.hdr - skb->data + network_hdr_len;
}
+#endif
+
+ hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
- err = tx_map_skb(nic_dev, skb, txq->sges);
+ hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len,
+ offset, ip_identify, skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static enum tx_offload_type hinic_tx_offload(struct sk_buff *skb,
+ struct hinic_sq_task *task,
+ u32 *queue_info, u8 avd_flag)
+{
+ enum tx_offload_type offload = 0;
+ int tso_cs_en;
+ u16 vlan_tag;
+
+ task->pkt_info0 = 0;
+ task->pkt_info1 = 0;
+ task->pkt_info2 = 0;
+
+ tso_cs_en = hinic_tso(task, queue_info, skb);
+ if (tso_cs_en < 0) {
+ offload = TX_OFFLOAD_INVALID;
+ return offload;
+ } else if (tso_cs_en) {
+ offload |= TX_OFFLOAD_TSO;
+ } else {
+ tso_cs_en = hinic_tx_csum(task, queue_info, skb);
+ if (tso_cs_en)
+ offload |= TX_OFFLOAD_CSUM;
+ }
+
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ vlan_tag = skb_vlan_tag_get(skb);
+ hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
+ vlan_tag >> VLAN_PRIO_SHIFT);
+ offload |= TX_OFFLOAD_VLAN;
+ }
+
+ if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
+ MAX_PAYLOAD_OFFSET)) {
+ offload = TX_OFFLOAD_INVALID;
+ return offload;
+ }
+
+ if (avd_flag == HINIC_TX_UFO_AVD)
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, UFO_AVD);
+
+ if (offload) {
+ hinic_task_set_tx_offload_valid(task, skb_network_offset(skb));
+ task->pkt_info0 = be32_to_cpu(task->pkt_info0);
+ task->pkt_info1 = be32_to_cpu(task->pkt_info1);
+ task->pkt_info2 = be32_to_cpu(task->pkt_info2);
+ }
+
+ return offload;
+}
+
+static inline void __get_pkt_stats(struct hinic_tx_info *tx_info,
+ struct sk_buff *skb)
+{
+ u32 ihs, hdr_len;
+
+ if (skb_is_gso(skb)) {
+#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \
+ defined(HAVE_SK_BUFF_ENCAPSULATION))
+ if (skb->encapsulation) {
+#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET
+ ihs = skb_inner_transport_offset(skb) +
+ inner_tcp_hdrlen(skb);
+#else
+ ihs = (skb_inner_transport_header(skb) - skb->data) +
+ inner_tcp_hdrlen(skb);
+#endif
+ } else {
+#endif
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \
+ defined(HAVE_SK_BUFF_ENCAPSULATION))
+ }
+#endif
+ hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ tx_info->num_bytes = skb->len + (u64)hdr_len;
+
+ } else {
+ tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
+ }
+
+ tx_info->num_pkts = 1;
+}
+
+inline u8 hinic_get_vlan_pri(struct sk_buff *skb)
+{
+ u16 vlan_tci = 0;
+ int err;
+
+ err = vlan_get_tag(skb, &vlan_tci);
if (err)
- goto skb_error;
+ return 0;
+
+ return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static void *__try_to_get_wqe(struct net_device *netdev, u16 q_id,
+ int wqebb_cnt, u16 *pi, u8 *owner)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ void *wqe = NULL;
+
+ netif_stop_subqueue(netdev, q_id);
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (unlikely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >=
+ wqebb_cnt)) {
+ netif_start_subqueue(netdev, q_id);
+ /* there have enough wqebbs after queue is wake up */
+ wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id,
+ wqebb_cnt, pi, owner);
+ }
+
+ return wqe;
+}
- wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
+#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO
+static int hinic_ufo_avoidance(struct sk_buff *skb, struct sk_buff **ufo_skb,
+ struct net_device *netdev, struct hinic_txq *txq)
+{
+ __be32 ip6_frag_id = skb_shinfo(skb)->ip6_frag_id;
+ u16 gso_size = skb_shinfo(skb)->gso_size;
+ struct frag_hdr ipv6_fhdr = {0};
+ union hinic_ip ip;
+ u32 l2_l3_hlen, l3_payload_len, extra_len;
+ u32 skb_len, nsize, frag_data_len = 0;
+ u16 ipv6_fhl_ext = sizeof(struct frag_hdr);
+ __wsum frag_data_csum = 0;
+ u16 frag_offset = 0;
+ u8 *tmp = NULL;
+ int err;
- sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
- if (!sq_wqe) {
- netif_stop_subqueue(netdev, qp->q_id);
+ ip.hdr = skb_network_header(skb);
+ if (ip.v6->version == 6 && ip.v6->nexthdr != NEXTHDR_UDP) {
+ TXQ_STATS_INC(txq, ufo_pkt_unsupport);
+ return -EPROTONOSUPPORT;
+ }
- /* Check for the case free_tx_poll is called in another cpu
- * and we stopped the subqueue after free_tx_poll check.
- */
- sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
- if (sq_wqe) {
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
- goto process_sq_wqe;
+ /* linearize the big ufo packet */
+ err = skb_linearize(skb);
+ if (err) {
+ TXQ_STATS_INC(txq, ufo_linearize_err);
+ return err;
+ }
+
+ extra_len = skb->len - HINIC_GSO_MAX_SIZE;
+ l2_l3_hlen = (u32)(skb_transport_header(skb) - skb_mac_header(skb));
+ l3_payload_len = skb->len - l2_l3_hlen;
+ frag_data_len += extra_len +
+ ((l3_payload_len - extra_len) % gso_size);
+ skb_len = skb->len - frag_data_len;
+
+ /* ipv6 need external frag header */
+ if (ip.v6->version == 6)
+ nsize = frag_data_len + l2_l3_hlen + ipv6_fhl_ext;
+ else
+ nsize = frag_data_len + l2_l3_hlen;
+
+ *ufo_skb = netdev_alloc_skb_ip_align(netdev, nsize);
+ if (!*ufo_skb) {
+ TXQ_STATS_INC(txq, ufo_alloc_skb_err);
+ return -ENOMEM;
+ }
+
+ /* copy l2_l3 layer header from original skb to ufo_skb */
+ skb_copy_from_linear_data_offset(skb, 0, skb_put(*ufo_skb, l2_l3_hlen),
+ l2_l3_hlen);
+
+ /* reserve ipv6 external frag header for ufo_skb */
+ if (ip.v6->version == 6) {
+ ipv6_fhdr.nexthdr = NEXTHDR_UDP;
+ ipv6_fhdr.reserved = 0;
+ frag_offset = (u16)(l3_payload_len - frag_data_len);
+ ipv6_fhdr.frag_off = htons(frag_offset);
+ ipv6_fhdr.identification = ip6_frag_id;
+ tmp = skb_put(*ufo_skb, ipv6_fhl_ext);
+ memcpy(tmp, &ipv6_fhdr, ipv6_fhl_ext);
+ }
+
+ /* split original one skb to two parts: skb and ufo_skb */
+ skb_split(skb, (*ufo_skb), skb_len);
+
+ /* modify skb ip total len */
+ ip.hdr = skb_network_header(skb);
+ if (ip.v4->version == 4)
+ ip.v4->tot_len = htons(ntohs(ip.v4->tot_len) -
+ (u16)frag_data_len);
+ else
+ ip.v6->payload_len = htons(ntohs(ip.v6->payload_len) -
+ (u16)frag_data_len);
+
+ /* set ufo_skb network header */
+ skb_set_network_header(*ufo_skb, skb_network_offset(skb));
+
+ /* set vlan offload feature */
+ (*ufo_skb)->vlan_tci = skb->vlan_tci;
+
+ /* modify ufo_skb ip total len, flag, frag_offset and compute csum */
+ ip.hdr = skb_network_header(*ufo_skb);
+ if (ip.v4->version == 4) {
+ /* compute ufo_skb data csum and put into skb udp csum */
+ tmp = (*ufo_skb)->data + l2_l3_hlen;
+ frag_data_csum = csum_partial(tmp, (int)frag_data_len, 0);
+ udp_hdr(skb)->check =
+ (__sum16)csum_add(~csum_fold(frag_data_csum),
+ udp_hdr(skb)->check);
+
+ ip.v4->tot_len = htons((u16)(skb_network_header_len(skb) +
+ frag_data_len));
+ ip.v4->frag_off = 0;
+ frag_offset = (u16)((l3_payload_len - frag_data_len) >> 3);
+ ip.v4->frag_off = htons(frag_offset);
+ ip_send_check(ip.v4);
+ } else {
+ /* compute ufo_skb data csum and put into skb udp csum */
+ tmp = (*ufo_skb)->data + l2_l3_hlen + ipv6_fhl_ext;
+ frag_data_csum = csum_partial(tmp, (int)frag_data_len, 0);
+ udp_hdr(skb)->check =
+ (__sum16)csum_add(~csum_fold(frag_data_csum),
+ udp_hdr(skb)->check);
+
+ ip.v6->payload_len = htons(ipv6_fhl_ext + (u16)frag_data_len);
+ ip.v6->nexthdr = NEXTHDR_FRAGMENT;
+ }
+
+ return 0;
+}
+#endif
+
+static netdev_tx_t hinic_send_one_skb(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct hinic_txq *txq,
+ u8 *flag, u8 avd_flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_tx_info *tx_info;
+ struct hinic_sq_wqe *wqe = NULL;
+ enum tx_offload_type offload = 0;
+ u16 q_id = txq->q_id;
+ u32 queue_info = 0;
+ u8 owner = 0;
+ u16 pi = 0;
+ int err, wqebb_cnt;
+ u16 num_sge = 0;
+ u16 skb_nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* skb->dev will not initialized when calling netdev_alloc_skb_ip_align
+ * and parameter of length is largger then PAGE_SIZE(under redhat7.3),
+ * but skb->dev will be used in vlan_get_tag or somewhere
+ */
+ if (unlikely(!skb->dev))
+ skb->dev = netdev;
+
+ if (unlikely(skb->len < MIN_SKB_LEN)) {
+ if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) {
+ TXQ_STATS_INC(txq, skb_pad_err);
+ goto tx_skb_pad_err;
}
- tx_unmap_skb(nic_dev, skb, txq->sges);
+ skb->len = MIN_SKB_LEN;
+ }
- u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_busy++;
- u64_stats_update_end(&txq->txq_stats.syncp);
- err = NETDEV_TX_BUSY;
- wqe_size = 0;
- goto flush_skbs;
+ num_sge = skb_nr_frags + 1;
+
+ /* :if skb->len is more than 65536B but num_sge is 1,
+ * driver will drop it
+ */
+ if (unlikely(skb->len > HINIC_GSO_MAX_SIZE && num_sge == 1)) {
+ TXQ_STATS_INC(txq, frag_len_overflow);
+ goto tx_drop_pkts;
+ }
+
+ /* if sge number more than 17, driver will set 17 sges */
+ if (unlikely(num_sge > HINIC_MAX_SQ_SGE)) {
+ TXQ_STATS_INC(txq, big_frags_pkts);
+ num_sge = HINIC_MAX_SQ_SGE;
}
-process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ wqebb_cnt = HINIC_SQ_WQEBB_CNT(num_sge);
+ if (likely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >=
+ wqebb_cnt)) {
+ if (likely(wqebb_cnt == 1)) {
+ hinic_update_sq_pi(nic_dev->hwdev, q_id,
+ wqebb_cnt, &pi, &owner);
+ wqe = txq->tx_info[pi].wqe;
+ } else {
+ wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id,
+ wqebb_cnt, &pi, &owner);
+ }
+
+ } else {
+ wqe = __try_to_get_wqe(netdev, q_id, wqebb_cnt, &pi, &owner);
+ if (likely(!wqe)) {
+ TXQ_STATS_INC(txq, busy);
+ return NETDEV_TX_BUSY;
+ }
+ }
- hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
+ tx_info = &txq->tx_info[pi];
+ tx_info->skb = skb;
+ tx_info->wqebb_cnt = wqebb_cnt;
-flush_skbs:
- netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping);
- if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
- hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
+ __get_pkt_stats(tx_info, skb);
- return err;
+ offload = hinic_tx_offload(skb, &wqe->task, &queue_info, avd_flag);
+ if (unlikely(offload == TX_OFFLOAD_INVALID)) {
+ hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner);
+ TXQ_STATS_INC(txq, offload_cow_skb_err);
+ goto tx_drop_pkts;
+ }
+
+ err = tx_map_skb(nic_dev, skb, txq, tx_info, wqe->buf_descs);
+ if (err) {
+ hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner);
+ goto tx_drop_pkts;
+ }
-skb_error:
+ hinic_prepare_sq_ctrl(&wqe->ctrl, queue_info, num_sge, owner);
+
+ hinic_send_sq_wqe(nic_dev->hwdev, q_id, wqe, wqebb_cnt,
+ nic_dev->sq_cos_mapping[hinic_get_vlan_pri(skb)]);
+
+ return NETDEV_TX_OK;
+
+tx_drop_pkts:
dev_kfree_skb_any(skb);
-update_error_stats:
+tx_skb_pad_err:
+ TXQ_STATS_INC(txq, dropped);
+
+ *flag = HINIC_TX_DROPED;
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_txq *txq;
+ u16 q_id = skb_get_queue_mapping(skb);
+ u8 flag = 0;
+#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO
+ struct sk_buff *ufo_skb;
+ int err;
+#endif
+
+ if (unlikely((!netif_carrier_ok(netdev) &&
+ !test_bit(HINIC_LP_TEST, &nic_dev->flags)) ||
+ !nic_dev->heart_status)) {
+ dev_kfree_skb_any(skb);
+ HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(q_id >= nic_dev->num_qps)) {
+ txq = &nic_dev->txqs[0];
+ HINIC_NIC_STATS_INC(nic_dev, tx_invalid_qid);
+ goto tx_drop_pkts;
+ }
+ txq = &nic_dev->txqs[q_id];
+
+#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO
+ /* UFO avoidance */
+ if (unlikely(skb->len > HINIC_GSO_MAX_SIZE &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP))) {
+ TXQ_STATS_INC(txq, big_udp_pkts);
+
+ err = hinic_ufo_avoidance(skb, &ufo_skb, netdev, txq);
+ if (err)
+ goto tx_drop_pkts;
+
+ err = hinic_send_one_skb(skb, netdev, txq, &flag,
+ HINIC_TX_UFO_AVD);
+ if (err == NETDEV_TX_BUSY) {
+ dev_kfree_skb_any(ufo_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (flag == HINIC_TX_DROPED) {
+ nicif_err(nic_dev, drv, netdev, "Send first skb failed for
HINIC_TX_SKB_DROPED\n");
+ dev_kfree_skb_any(ufo_skb);
+ return NETDEV_TX_OK;
+ }
+
+ err = hinic_send_one_skb(ufo_skb, netdev, txq, &flag,
+ HINIC_TX_NON_AVD);
+ if (err == NETDEV_TX_BUSY) {
+ nicif_err(nic_dev, drv, netdev, "Send second skb failed for
NETDEV_TX_BUSY\n");
+ dev_kfree_skb_any(ufo_skb);
+ return NETDEV_TX_OK;
+ }
+
+ return NETDEV_TX_OK;
+ }
+#endif
+
+ return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD);
+
+tx_drop_pkts:
+ dev_kfree_skb_any(skb);
u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_dropped++;
+ txq->txq_stats.dropped++;
u64_stats_update_end(&txq->txq_stats.syncp);
- return err;
+
+ return NETDEV_TX_OK;
}
-/**
- * tx_free_skb - unmap and free skb
- * @nic_dev: nic device
- * @skb: the skb
- * @sges: the sges that are connected to the skb
- **/
-static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
- struct hinic_sge *sges)
+static inline void tx_free_skb(struct hinic_nic_dev *nic_dev,
+ struct sk_buff *skb,
+ struct hinic_tx_info *tx_info)
{
- tx_unmap_skb(nic_dev, skb, sges);
+ tx_unmap_skb(nic_dev, skb, tx_info->dma_len);
+ kfree(tx_info->cpy_buff);
+ tx_info->cpy_buff = NULL;
dev_kfree_skb_any(skb);
}
-/**
- * free_all_rx_skbs - free all skbs in tx queue
- * @txq: tx queue
- **/
static void free_all_tx_skbs(struct hinic_txq *txq)
{
- struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
- struct hinic_sq *sq = txq->sq;
- struct hinic_sq_wqe *sq_wqe;
- unsigned int wqe_size;
- struct sk_buff *skb;
- int nr_sges;
+ struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ struct hinic_tx_info *tx_info;
u16 ci;
+ int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev,
+ txq->q_id) + 1;
- while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
- sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
- if (!sq_wqe)
- break;
+ while (free_wqebbs < txq->q_depth) {
+ ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id);
- nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ tx_info = &txq->tx_info[ci];
- hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
+ tx_free_skb(nic_dev, tx_info->skb, tx_info);
- hinic_sq_put_wqe(sq, wqe_size);
+ hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id,
+ tx_info->wqebb_cnt);
- tx_free_skb(nic_dev, skb, txq->free_sges);
+ free_wqebbs += tx_info->wqebb_cnt;
}
}
-/**
- * free_tx_poll - free finished tx skbs in tx queue that connected to napi
- * @napi: napi
- * @budget: number of tx
- *
- * Return 0 - Success, negative - Failure
- **/
-static int free_tx_poll(struct napi_struct *napi, int budget)
+int hinic_tx_poll(struct hinic_txq *txq, int budget)
{
- struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
- struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
- struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
- struct netdev_queue *netdev_txq;
- struct hinic_sq *sq = txq->sq;
- struct hinic_wq *wq = sq->wq;
- struct hinic_sq_wqe *sq_wqe;
- unsigned int wqe_size;
- int nr_sges, pkts = 0;
+ struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
struct sk_buff *skb;
- u64 tx_bytes = 0;
- u16 hw_ci, sw_ci;
+ struct hinic_tx_info *tx_info;
+ u64 tx_bytes = 0, wake = 0;
+ int pkts = 0, nr_pkts = 0, wqebb_cnt = 0;
+ u16 hw_ci, sw_ci = 0, q_id = txq->q_id;
+
+ hw_ci = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id);
+ sw_ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id);
do {
- hw_ci = HW_CONS_IDX(sq) & wq->mask;
+ tx_info = &txq->tx_info[sw_ci];
- /* Reading a WQEBB to get real WQE size and consumer index. */
- sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
- if ((!sq_wqe) ||
- (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
+ /* Whether all of the wqebb of this wqe is completed */
+ if (hw_ci == sw_ci || ((hw_ci - sw_ci) &
+ txq->q_mask) < tx_info->wqebb_cnt) {
break;
-
- /* If this WQE have multiple WQEBBs, we will read again to get
- * full size WQE.
- */
- if (wqe_size > wq->wqebb_size) {
- sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
- if (unlikely(!sq_wqe))
- break;
}
- tx_bytes += skb->len;
- pkts++;
-
- nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
+ prefetch(&txq->tx_info[sw_ci]);
- hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
+ wqebb_cnt += tx_info->wqebb_cnt;
- hinic_sq_put_wqe(sq, wqe_size);
+ skb = tx_info->skb;
+ tx_bytes += tx_info->num_bytes;
+ nr_pkts += tx_info->num_pkts;
+ pkts++;
- tx_free_skb(nic_dev, skb, txq->free_sges);
- } while (pkts < budget);
+ tx_free_skb(nic_dev, skb, tx_info);
- if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
- hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
- netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
+ } while (likely(pkts < budget));
- __netif_tx_lock(netdev_txq, smp_processor_id());
+ hinic_update_sq_local_ci(nic_dev->hwdev, q_id, wqebb_cnt);
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) &&
+ hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= 1 &&
+ test_bit(HINIC_INTF_UP, &nic_dev->flags))) {
+ struct netdev_queue *netdev_txq =
+ netdev_get_tx_queue(txq->netdev, q_id);
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+ /* To avoid re-waking subqueue with xmit_frame */
+ if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) {
+ netif_wake_subqueue(nic_dev->netdev, q_id);
+ wake++;
+ }
__netif_tx_unlock(netdev_txq);
-
- u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_wake++;
- u64_stats_update_end(&txq->txq_stats.syncp);
}
u64_stats_update_begin(&txq->txq_stats.syncp);
txq->txq_stats.bytes += tx_bytes;
- txq->txq_stats.pkts += pkts;
+ txq->txq_stats.packets += nr_pkts;
+ txq->txq_stats.wake += wake;
u64_stats_update_end(&txq->txq_stats.syncp);
- if (pkts < budget) {
- napi_complete(napi);
- enable_irq(sq->irq);
- return pkts;
- }
-
- return budget;
+ return pkts;
}
-static void tx_napi_add(struct hinic_txq *txq, int weight)
+int hinic_setup_tx_wqe(struct hinic_txq *txq)
{
- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
- napi_enable(&txq->napi);
-}
-
-static void tx_napi_del(struct hinic_txq *txq)
-{
- napi_disable(&txq->napi);
- netif_napi_del(&txq->napi);
-}
+ struct net_device *netdev = txq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sq_wqe *wqe;
+ struct hinic_tx_info *tx_info;
+ u16 pi = 0;
+ int i;
+ u8 owner = 0;
-static irqreturn_t tx_irq(int irq, void *data)
-{
- struct hinic_txq *txq = data;
- struct hinic_dev *nic_dev;
+ for (i = 0; i < txq->q_depth; i++) {
+ tx_info = &txq->tx_info[i];
- nic_dev = netdev_priv(txq->netdev);
+ wqe = hinic_get_sq_wqe(nic_dev->hwdev, txq->q_id,
+ 1, &pi, &owner);
+ if (!wqe) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get SQ wqe\n");
+ break;
+ }
- /* Disable the interrupt until napi will be completed */
- disable_irq_nosync(txq->sq->irq);
+ tx_info->wqe = wqe;
+ }
- hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
+ hinic_return_sq_wqe(nic_dev->hwdev, txq->q_id, txq->q_depth, owner);
- napi_schedule(&txq->napi);
- return IRQ_HANDLED;
+ return i;
}
-static int tx_request_irq(struct hinic_txq *txq)
+int hinic_setup_all_tx_resources(struct net_device *netdev)
{
- struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_sq *sq = txq->sq;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_txq *txq;
+ u64 tx_info_sz;
+ u16 i, q_id;
int err;
- tx_napi_add(txq, nic_dev->tx_weight);
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ tx_info_sz = txq->q_depth * sizeof(*txq->tx_info);
+ if (!tx_info_sz) {
+ nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size tx%d info\n",
+ q_id);
+ err = -EINVAL;
+ goto init_txq_err;
+ }
- hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
- TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
- TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
- TX_IRQ_NO_RESEND_TIMER);
+ txq->tx_info = kzalloc(tx_info_sz, GFP_KERNEL);
+ if (!txq->tx_info) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate Tx:%d info\n",
+ q_id);
+ err = -ENOMEM;
+ goto init_txq_err;
+ }
- err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
- if (err) {
- dev_err(&pdev->dev, "Failed to request Tx irq\n");
- tx_napi_del(txq);
- return err;
+ err = hinic_setup_tx_wqe(txq);
+ if (err != txq->q_depth) {
+ nicif_err(nic_dev, drv, netdev, "Failed to setup Tx:%d wqe\n",
+ q_id);
+ q_id++;
+ goto init_txq_err;
+ }
}
return 0;
+
+init_txq_err:
+ for (i = 0; i < q_id; i++) {
+ txq = &nic_dev->txqs[q_id];
+ kfree(txq->tx_info);
+ }
+
+ return err;
}
-static void tx_free_irq(struct hinic_txq *txq)
+void hinic_free_all_tx_resources(struct net_device *netdev)
{
- struct hinic_sq *sq = txq->sq;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_txq *txq;
+ u16 q_id;
- free_irq(sq->irq, txq);
- tx_napi_del(txq);
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ free_all_tx_skbs(txq);
+ kfree(txq->tx_info);
+ }
}
-/**
- * hinic_init_txq - Initialize the Tx Queue
- * @txq: Logical Tx Queue
- * @sq: Hardware Tx Queue to connect the Logical queue with
- * @netdev: network device to connect the Logical queue with
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
- struct net_device *netdev)
+void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id)
{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- int err, irqname_len;
- size_t sges_size;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int up;
- txq->netdev = netdev;
- txq->sq = sq;
-
- txq_stats_init(txq);
+ for (up = HINIC_DCB_UP_MAX - 1; up >= 0; up--)
+ nic_dev->sq_cos_mapping[up] = nic_dev->default_cos_id;
+}
- txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
+int hinic_sq_cos_mapping(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_state dcb_state = {0};
+ u8 default_cos = 0;
+ int err;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
- if (!txq->sges)
- return -ENOMEM;
+ if (HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ err = hinic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state);
+ if (err) {
+ hinic_info(nic_dev, drv, "Failed to get vf default cos\n");
+ return err;
+ }
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
- if (!txq->free_sges) {
- err = -ENOMEM;
- goto err_alloc_free_sges;
+ default_cos = dcb_state.default_cos;
+ nic_dev->default_cos_id = default_cos;
+ hinic_set_sq_default_cos(nic_dev->netdev, default_cos);
+ } else {
+ default_cos = nic_dev->default_cos_id;
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ memcpy(nic_dev->sq_cos_mapping, nic_dev->up_cos,
+ sizeof(nic_dev->sq_cos_mapping));
+ else
+ hinic_set_sq_default_cos(nic_dev->netdev, default_cos);
+
+ dcb_state.dcb_on = !!test_bit(HINIC_DCB_ENABLE,
+ &nic_dev->flags);
+ dcb_state.default_cos = default_cos;
+ memcpy(dcb_state.up_cos, nic_dev->sq_cos_mapping,
+ sizeof(dcb_state.up_cos));
+
+ err = hinic_set_dcb_state(nic_dev->hwdev, &dcb_state);
+ if (err)
+ hinic_info(nic_dev, drv, "Failed to set vf default cos\n");
}
- irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
- txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
- if (!txq->irq_name) {
- err = -ENOMEM;
- goto err_alloc_irqname;
+ return err;
+}
+
+int hinic_alloc_txqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_txq *txq;
+ u16 q_id, num_txqs = nic_dev->max_qps;
+ u64 txq_size;
+
+ txq_size = num_txqs * sizeof(*nic_dev->txqs);
+ if (!txq_size) {
+ nic_err(&pdev->dev, "Cannot allocate zero size txqs\n");
+ return -EINVAL;
}
- sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
+ nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL);
- err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
- CI_UPDATE_NO_COALESC);
- if (err)
- goto err_hw_ci;
+ if (!nic_dev->txqs) {
+ nic_err(&pdev->dev, "Failed to allocate txqs\n");
+ return -ENOMEM;
+ }
- err = tx_request_irq(txq);
- if (err) {
- netdev_err(netdev, "Failed to request Tx irq\n");
- goto err_req_tx_irq;
+ for (q_id = 0; q_id < num_txqs; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ txq->netdev = netdev;
+ txq->q_id = q_id;
+ txq->q_depth = nic_dev->sq_depth;
+ txq->q_mask = nic_dev->sq_depth - 1;
+
+ txq_stats_init(txq);
}
return 0;
+}
-err_req_tx_irq:
-err_hw_ci:
- devm_kfree(&netdev->dev, txq->irq_name);
-
-err_alloc_irqname:
- devm_kfree(&netdev->dev, txq->free_sges);
+void hinic_free_txqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
-err_alloc_free_sges:
- devm_kfree(&netdev->dev, txq->sges);
- return err;
+ kfree(nic_dev->txqs);
}
-/**
- * hinic_clean_txq - Clean the Tx Queue
- * @txq: Logical Tx Queue
- **/
-void hinic_clean_txq(struct hinic_txq *txq)
+/* should stop transmit any packets before calling this function */
+#define HINIC_FLUSH_QUEUE_TIMEOUT 1000
+
+int hinic_stop_sq(struct hinic_txq *txq)
{
- struct net_device *netdev = txq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ unsigned long timeout;
+ int free_wqebbs, err;
+
+ timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
+ do {
+ free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev,
+ txq->q_id) + 1;
+ if (free_wqebbs == txq->q_depth)
+ return 0;
+
+ usleep_range(900, 1000);
+ } while (time_before(jiffies, timeout));
+
+ /* force hardware to drop packets */
+ timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
+ do {
+ free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev,
+ txq->q_id) + 1;
+ if (free_wqebbs == txq->q_depth)
+ return 0;
+
+ err = hinic_force_drop_tx_pkt(nic_dev->hwdev);
+ if (err)
+ break;
- tx_free_irq(txq);
+ usleep_range(9900, 10000);
+ } while (time_before(jiffies, timeout));
- free_all_tx_skbs(txq);
+ /* Avoid msleep takes too long and get a fake result */
+ free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev, txq->q_id) + 1;
+ if (free_wqebbs == txq->q_depth)
+ return 0;
- devm_kfree(&netdev->dev, txq->irq_name);
- devm_kfree(&netdev->dev, txq->free_sges);
- devm_kfree(&netdev->dev, txq->sges);
+ return -EFAULT;
}
+
+int hinic_flush_txqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 qid;
+ int err;
+
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ err = hinic_stop_sq(&nic_dev->txqs[qid]);
+ if (err)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to stop sq%d\n", qid);
+ }
+
+ return 0;
+} /*lint -e766*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index 1fa55dce5aa7..89fce0849e6e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -16,47 +16,105 @@
#ifndef HINIC_TX_H
#define HINIC_TX_H
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/u64_stats_sync.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_qp.h"
+enum tx_offload_type {
+ TX_OFFLOAD_TSO = BIT(0),
+ TX_OFFLOAD_CSUM = BIT(1),
+ TX_OFFLOAD_VLAN = BIT(2),
+ TX_OFFLOAD_INVALID = BIT(3),
+};
struct hinic_txq_stats {
- u64 pkts;
- u64 bytes;
- u64 tx_busy;
- u64 tx_wake;
- u64 tx_dropped;
+ u64 packets;
+ u64 bytes;
+ u64 busy;
+ u64 wake;
+ u64 dropped;
+ u64 big_frags_pkts;
+ u64 big_udp_pkts;
+
+ /* Subdivision statistics show in private tool */
+ u64 ufo_pkt_unsupport;
+ u64 ufo_linearize_err;
+ u64 ufo_alloc_skb_err;
+ u64 skb_pad_err;
+ u64 frag_len_overflow;
+ u64 offload_cow_skb_err;
+ u64 alloc_cpy_frag_err;
+ u64 map_cpy_frag_err;
+ u64 map_frag_err;
+
+#ifdef HAVE_NDO_GET_STATS64
+ struct u64_stats_sync syncp;
+#else
+ struct u64_stats_sync_empty syncp;
+#endif
+};
+
+struct hinic_dma_len {
+ dma_addr_t dma;
+ u32 len;
+};
+
+#define MAX_SGE_NUM_PER_WQE 17
+
+struct hinic_tx_info {
+ struct sk_buff *skb;
+
+ int wqebb_cnt;
- struct u64_stats_sync syncp;
+ int num_sge;
+ void *wqe;
+ u8 *cpy_buff;
+ u16 num_pkts;
+ u64 num_bytes;
+ struct hinic_dma_len dma_len[MAX_SGE_NUM_PER_WQE];
};
struct hinic_txq {
- struct net_device *netdev;
- struct hinic_sq *sq;
+ struct net_device *netdev;
- struct hinic_txq_stats txq_stats;
+ u16 q_id;
+ u16 q_depth;
+ u16 q_mask;
+ struct hinic_txq_stats txq_stats;
- int max_sges;
- struct hinic_sge *sges;
- struct hinic_sge *free_sges;
+ struct hinic_tx_info *tx_info;
+};
+
+enum hinic_tx_xmit_status {
+ HINIC_TX_OK = 0,
+ HINIC_TX_DROPED = 1,
+ HINIC_TX_BUSY = 2,
+};
- char *irq_name;
- struct napi_struct napi;
+enum hinic_tx_avd_type {
+ HINIC_TX_NON_AVD = 0,
+ HINIC_TX_UFO_AVD = 1,
};
-void hinic_txq_clean_stats(struct hinic_txq *txq);
+void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats);
-void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
+void hinic_txq_get_stats(struct hinic_txq *txq,
+ struct hinic_txq_stats *stats);
netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
- struct net_device *netdev);
+int hinic_setup_all_tx_resources(struct net_device *netdev);
+
+void hinic_free_all_tx_resources(struct net_device *netdev);
+
+void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id);
+
+int hinic_sq_cos_mapping(struct net_device *netdev);
+
+int hinic_alloc_txqs(struct net_device *netdev);
+
+void hinic_free_txqs(struct net_device *netdev);
+
+int hinic_tx_poll(struct hinic_txq *txq, int budget);
+
+u8 hinic_get_vlan_pri(struct sk_buff *skb);
-void hinic_clean_txq(struct hinic_txq *txq);
+int hinic_flush_txqs(struct net_device *netdev);
#endif
--
2.31.0