Take over from openEuler-22.03-LTS-SP2

This commit is contained in:
Mike Müller 2025-06-28 11:56:44 +02:00
parent 9ac61c3230
commit e150186d7a
80 changed files with 82536 additions and 0 deletions

22
Kconfig Normal file
View File

@ -0,0 +1,22 @@
#
# Huawei driver configuration
#
config SCSI_HUAWEI_FC
tristate "Huawei Fibre Channel Adapter"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
depends on ARM64 || X86_64
default m
help
If you have a Fibre Channel PCI card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Huawei cards. If you say Y, you will be asked
for your specific card in the following questions.
if SCSI_HUAWEI_FC
source "drivers/scsi/huawei/hifc/Kconfig"
endif # SCSI_HUAWEI_FC

5
Makefile Normal file
View File

@ -0,0 +1,5 @@
#
# Makefile for the Huawei device drivers.
#
obj-$(CONFIG_SCSI_FC_HIFC) += hifc/

11
hifc/Kconfig Normal file
View File

@ -0,0 +1,11 @@
#
# Huawei driver configuration
#
config SCSI_FC_HIFC
tristate "Huawei hifc Fibre Channel Support"
default m
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
help
This driver supports Huawei Fibre Channel PCI and
PCIE host adapters.

41
hifc/Makefile Normal file
View File

@ -0,0 +1,41 @@
obj-$(CONFIG_SCSI_FC_HIFC) += hifc.o
hifc-objs += hifc_utils.o
hifc-objs += hifc_hba.o
hifc-objs += hifc_portmng.o
hifc-objs += hifc_module.o
hifc-objs += hifc_chipitf.o
hifc-objs += hifc_io.o
hifc-objs += hifc_queue.o
hifc-objs += hifc_service.o
hifc-objs += hifc_wqe.o
hifc-objs += hifc_cfg.o
hifc-objs += hifc_lld.o
hifc-objs += unf_io.o
hifc-objs += unf_io_abnormal.o
hifc-objs += unf_scsi.o
hifc-objs += unf_init.o
hifc-objs += unf_event.o
hifc-objs += unf_exchg.o
hifc-objs += unf_lport.o
hifc-objs += unf_disc.o
hifc-objs += unf_rport.o
hifc-objs += unf_service.o
hifc-objs += unf_portman.o
hifc-objs += unf_npiv.o
hifc-objs += hifc_sml.o
hifc-objs += hifc_tool.o
hifc-objs += hifc_tool_hw.o
hifc-objs += hifc_dbgtool_knl.o
hifc-objs += hifc_hwif.o
hifc-objs += hifc_eqs.o
hifc-objs += hifc_api_cmd.o
hifc-objs += hifc_mgmt.o
hifc-objs += hifc_wq.o
hifc-objs += hifc_cmdq.o
hifc-objs += hifc_hwdev.o
hifc-objs += hifc_cqm_main.o
hifc-objs += hifc_cqm_object.o

1154
hifc/hifc_api_cmd.c Normal file

File diff suppressed because it is too large Load Diff

268
hifc/hifc_api_cmd.h Normal file
View File

@ -0,0 +1,268 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_API_CMD_H_
#define HIFC_API_CMD_H_
#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0
#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16
#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24
#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU
#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU
#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU
#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU
#define HIFC_API_CMD_CELL_CTRL_SET(val, member) \
((((u64)val) & HIFC_API_CMD_CELL_CTRL_##member##_MASK) << \
HIFC_API_CMD_CELL_CTRL_##member##_SHIFT)
#define HIFC_API_CMD_DESC_API_TYPE_SHIFT 0
#define HIFC_API_CMD_DESC_RD_WR_SHIFT 1
#define HIFC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
#define HIFC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3
#define HIFC_API_CMD_DESC_PRIV_DATA_SHIFT 8
#define HIFC_API_CMD_DESC_DEST_SHIFT 32
#define HIFC_API_CMD_DESC_SIZE_SHIFT 40
#define HIFC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
#define HIFC_API_CMD_DESC_API_TYPE_MASK 0x1U
#define HIFC_API_CMD_DESC_RD_WR_MASK 0x1U
#define HIFC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U
#define HIFC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U
#define HIFC_API_CMD_DESC_DEST_MASK 0x1FU
#define HIFC_API_CMD_DESC_SIZE_MASK 0x7FFU
#define HIFC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU
#define HIFC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU
#define HIFC_API_CMD_DESC_SET(val, member) \
((((u64)val) & HIFC_API_CMD_DESC_##member##_MASK) << \
HIFC_API_CMD_DESC_##member##_SHIFT)
#define HIFC_API_CMD_STATUS_HEADER_VALID_SHIFT 0
#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
#define HIFC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU
#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU
#define HIFC_API_CMD_STATUS_HEADER_GET(val, member) \
(((val) >> HIFC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
HIFC_API_CMD_STATUS_HEADER_##member##_MASK)
#define HIFC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
#define HIFC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U
#define HIFC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U
#define HIFC_API_CMD_CHAIN_REQ_SET(val, member) \
(((val) & HIFC_API_CMD_CHAIN_REQ_##member##_MASK) << \
HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT)
#define HIFC_API_CMD_CHAIN_REQ_GET(val, member) \
(((val) >> HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
HIFC_API_CMD_CHAIN_REQ_##member##_MASK)
#define HIFC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
((val) & (~(HIFC_API_CMD_CHAIN_REQ_##member##_MASK \
<< HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1
#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U
#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U
#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U
#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U
#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U
#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U
#define HIFC_API_CMD_CHAIN_CTRL_SET(val, member) \
(((val) & HIFC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
#define HIFC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
((val) & (~(HIFC_API_CMD_CHAIN_CTRL_##member##_MASK \
<< HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
#define HIFC_API_CMD_RESP_HEAD_VALID_MASK 0xFF
#define HIFC_API_CMD_RESP_HEAD_VALID_CODE 0xFF
#define HIFC_API_CMD_RESP_HEADER_VALID(val) \
(((val) & HIFC_API_CMD_RESP_HEAD_VALID_MASK) == \
HIFC_API_CMD_RESP_HEAD_VALID_CODE)
#define HIFC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU
#define HIFC_API_CMD_STATUS_CONS_IDX_SHIFT 0
#define HIFC_API_CMD_STATUS_FSM_MASK 0xFU
#define HIFC_API_CMD_STATUS_FSM_SHIFT 24
#define HIFC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U
#define HIFC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
#define HIFC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
#define HIFC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
#define HIFC_API_CMD_STATUS_GET(val, member) \
(((val) >> HIFC_API_CMD_STATUS_##member##_SHIFT) & \
HIFC_API_CMD_STATUS_##member##_MASK)
/* API CMD registers */
#define HIFC_CSR_API_CMD_BASE 0xF000
#define HIFC_CSR_API_CMD_STRIDE 0x100
#define HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x0 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x4 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x8 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0xC + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x10 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x14 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x1C + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x20 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_STATUS_0_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x30 + (idx) * HIFC_CSR_API_CMD_STRIDE)
enum hifc_api_cmd_chain_type {
/* write command with completion notification */
HIFC_API_CMD_WRITE = 0,
/* read command with completion notification */
HIFC_API_CMD_READ = 1,
/* write to mgmt cpu command with completion */
HIFC_API_CMD_WRITE_TO_MGMT_CPU = 2,
/* multi read command with completion notification - not used */
HIFC_API_CMD_MULTI_READ = 3,
/* write command without completion notification */
HIFC_API_CMD_POLL_WRITE = 4,
/* read command without completion notification */
HIFC_API_CMD_POLL_READ = 5,
/* read from mgmt cpu command with completion */
HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6,
HIFC_API_CMD_MAX,
};
struct hifc_api_cmd_status {
u64 header;
u32 buf_desc;
u32 cell_addr_hi;
u32 cell_addr_lo;
u32 rsvd0;
u64 rsvd1;
};
/* HW struct */
struct hifc_api_cmd_cell {
u64 ctrl;
/* address is 64 bit in HW struct */
u64 next_cell_paddr;
u64 desc;
/* HW struct */
union {
struct {
u64 hw_cmd_paddr;
} write;
struct {
u64 hw_wb_resp_paddr;
u64 hw_cmd_paddr;
} read;
};
};
struct hifc_api_cmd_resp_fmt {
u64 header;
u64 rsvd[3];
u64 resp_data;
};
struct hifc_api_cmd_cell_ctxt {
struct hifc_api_cmd_cell *cell_vaddr;
void *api_cmd_vaddr;
struct hifc_api_cmd_resp_fmt *resp;
struct completion done;
int status;
u32 saved_prod_idx;
};
struct hifc_api_cmd_chain_attr {
struct hifc_hwdev *hwdev;
enum hifc_api_cmd_chain_type chain_type;
u32 num_cells;
u16 rsp_size;
u16 cell_size;
};
struct hifc_api_cmd_chain {
struct hifc_hwdev *hwdev;
enum hifc_api_cmd_chain_type chain_type;
u32 num_cells;
u16 cell_size;
u16 rsp_size;
/* HW members is 24 bit format */
u32 prod_idx;
u32 cons_idx;
struct semaphore sem;
/* Async cmd can not be scheduling */
spinlock_t async_lock;
dma_addr_t wb_status_paddr;
struct hifc_api_cmd_status *wb_status;
dma_addr_t head_cell_paddr;
struct hifc_api_cmd_cell *head_node;
struct hifc_api_cmd_cell_ctxt *cell_ctxt;
struct hifc_api_cmd_cell *curr_node;
struct hifc_dma_addr_align cells_addr;
u8 *cell_vaddr_base;
u64 cell_paddr_base;
u8 *rsp_vaddr_base;
u64 rsp_paddr_base;
u8 *buf_vaddr_base;
u64 buf_paddr_base;
u64 cell_size_align;
u64 rsp_size_align;
u64 buf_size_align;
};
int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain,
enum hifc_node_id dest, void *cmd, u16 size);
int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain,
enum hifc_node_id dest, void *cmd, u16 size,
void *ack, u16 ack_size);
int hifc_api_cmd_init(struct hifc_hwdev *hwdev,
struct hifc_api_cmd_chain **chain);
void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain);
#endif

822
hifc/hifc_cfg.c Normal file
View File

@ -0,0 +1,822 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/semaphore.h>
#include <linux/vmalloc.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_hwif.h"
#include "hifc_cqm_main.h"
#include "hifc_api_cmd.h"
#include "hifc_hw.h"
#include "hifc_mgmt.h"
#include "hifc_cfg.h"
static uint intr_mode;
int hifc_sync_time(void *hwdev, u64 time)
{
struct hifc_sync_time_info time_info = {0};
u16 out_size = sizeof(time_info);
int err;
time_info.mstime = time;
err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
HIFC_MGMT_CMD_SYNC_TIME, &time_info,
sizeof(time_info), &time_info, &out_size,
0);
if (err || time_info.status || !out_size) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n",
err, time_info.status, out_size);
}
return err;
}
static void parse_pub_res_cap(struct service_cap *cap,
struct hifc_dev_cap *dev_cap,
enum func_type type)
{
cap->port_id = dev_cap->port_id;
cap->force_up = dev_cap->force_up;
pr_info("Get public resource capbility, force_up: 0x%x\n",
cap->force_up);
/* FC need max queue number, but max queue number info is in
* l2nic cap, we also put max queue num info in public cap, so
* FC can get correct max queue number info.
*/
cap->max_sqs = dev_cap->nic_max_sq + 1;
cap->max_rqs = dev_cap->nic_max_rq + 1;
cap->host_total_function = dev_cap->host_total_func;
cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
cap->max_connect_num = dev_cap->max_conn_num;
cap->max_stick2cache_num = dev_cap->max_stick2cache_num;
pr_info("Get public resource capbility, svc_cap_en: 0x%x\n",
dev_cap->svc_cap_en);
pr_info("port_id=0x%x\n", cap->port_id);
pr_info("Host_total_function=0x%x, host_oq_id_mask_val=0x%x\n",
cap->host_total_function, cap->host_oq_id_mask_val);
}
static void parse_fc_res_cap(struct service_cap *cap,
struct hifc_dev_cap *dev_cap,
enum func_type type)
{
struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap;
fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx;
fc_cap->scq_num = dev_cap->fc_max_scq;
fc_cap->srq_num = dev_cap->fc_max_srq;
fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx;
fc_cap->vp_id_start = dev_cap->fc_vp_id_start;
fc_cap->vp_id_end = dev_cap->fc_vp_id_end;
pr_info("Get fc resource capbility\n");
pr_info("Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x\n",
fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num,
fc_cap->max_child_qpc_num);
pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n",
fc_cap->vp_id_start, fc_cap->vp_id_end);
}
static void parse_dev_cap(struct hifc_hwdev *dev,
struct hifc_dev_cap *dev_cap, enum func_type type)
{
struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
/* Public resource */
parse_pub_res_cap(cap, dev_cap, type);
/* PPF managed dynamic resource */
parse_fc_res_cap(cap, dev_cap, type);
}
static int get_cap_from_fw(struct hifc_hwdev *dev, enum func_type type)
{
struct hifc_dev_cap dev_cap = {0};
u16 out_len = sizeof(dev_cap);
int err;
dev_cap.version = HIFC_CMD_VER_FUNC_ID;
err = hifc_global_func_id_get(dev, &dev_cap.func_id);
if (err)
return err;
sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n",
dev_cap.func_id);
err = hifc_msg_to_mgmt_sync(dev, HIFC_MOD_CFGM, HIFC_CFG_NIC_CAP,
&dev_cap, sizeof(dev_cap),
&dev_cap, &out_len, 0);
if (err || dev_cap.status || !out_len) {
sdk_err(dev->dev_hdl,
"Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n",
err, dev_cap.status, out_len);
return -EFAULT;
}
parse_dev_cap(dev, &dev_cap, type);
return 0;
}
static void fc_param_fix(struct hifc_hwdev *dev)
{
struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
struct fc_service_cap *fc_cap = &cap->fc_cap;
fc_cap->parent_qpc_size = FC_PCTX_SZ;
fc_cap->child_qpc_size = FC_CCTX_SZ;
fc_cap->sqe_size = FC_SQE_SZ;
fc_cap->scqc_size = FC_SCQC_SZ;
fc_cap->scqe_size = FC_SCQE_SZ;
fc_cap->srqc_size = FC_SRQC_SZ;
fc_cap->srqe_size = FC_SRQE_SZ;
}
static void cfg_get_eq_num(struct hifc_hwdev *dev)
{
struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info;
eq_info->num_ceq = dev->hwif->attr.num_ceqs;
eq_info->num_ceq_remain = eq_info->num_ceq;
}
static int cfg_init_eq(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
struct cfg_eq *eq;
u8 num_ceq, i = 0;
cfg_get_eq_num(dev);
num_ceq = cfg_mgmt->eq_info.num_ceq;
sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n",
cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain);
if (!num_ceq) {
sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n");
return -EFAULT;
}
eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL);
if (!eq)
return -ENOMEM;
for (i = 0; i < num_ceq; ++i) {
eq[i].eqn = i;
eq[i].free = CFG_FREE;
eq[i].type = SERVICE_T_MAX;
}
cfg_mgmt->eq_info.eq = eq;
mutex_init(&cfg_mgmt->eq_info.eq_mutex);
return 0;
}
static int cfg_init_interrupt(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info;
u16 intr_num = dev->hwif->attr.num_irqs;
if (!intr_num) {
sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n");
return -EFAULT;
}
irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info),
GFP_KERNEL);
if (!irq_info->alloc_info)
return -ENOMEM;
irq_info->num_irq_hw = intr_num;
cfg_mgmt->svc_cap.interrupt_type = intr_mode;
mutex_init(&irq_info->irq_mutex);
return 0;
}
static int cfg_enable_interrupt(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw;
void *pcidev = dev->pcidev_hdl;
struct irq_alloc_info_st *irq_info;
struct msix_entry *entry;
u16 i = 0;
int actual_irq;
irq_info = cfg_mgmt->irq_param_info.alloc_info;
sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d.\n",
cfg_mgmt->svc_cap.interrupt_type, nreq);
switch (cfg_mgmt->svc_cap.interrupt_type) {
case INTR_TYPE_MSIX:
if (!nreq) {
sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n");
return -EINVAL;
}
entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
for (i = 0; i < nreq; i++)
entry[i].entry = i;
actual_irq = pci_enable_msix_range(pcidev, entry,
VECTOR_THRESHOLD, nreq);
if (actual_irq < 0) {
sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed.\n");
kfree(entry);
return -ENOMEM;
}
nreq = (u16)actual_irq;
cfg_mgmt->irq_param_info.num_total = nreq;
cfg_mgmt->irq_param_info.num_irq_remain = nreq;
sdk_info(dev->dev_hdl, "Request %d msix vector success.\n",
nreq);
for (i = 0; i < nreq; ++i) {
/* u16 driver uses to specify entry, OS writes */
irq_info[i].info.msix_entry_idx = entry[i].entry;
/* u32 kernel uses to write allocated vector */
irq_info[i].info.irq_id = entry[i].vector;
irq_info[i].type = SERVICE_T_MAX;
irq_info[i].free = CFG_FREE;
}
kfree(entry);
break;
default:
sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n",
cfg_mgmt->svc_cap.interrupt_type);
break;
}
return 0;
}
int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 num,
struct irq_info *irq_info_array, u16 *act_num)
{
struct hifc_hwdev *dev = hwdev;
struct cfg_mgmt_info *cfg_mgmt;
struct cfg_irq_info *irq_info;
struct irq_alloc_info_st *alloc_info;
int max_num_irq;
u16 free_num_irq;
int i, j;
if (!hwdev || !irq_info_array || !act_num)
return -EINVAL;
cfg_mgmt = dev->cfg_mgmt;
irq_info = &cfg_mgmt->irq_param_info;
alloc_info = irq_info->alloc_info;
max_num_irq = irq_info->num_total;
free_num_irq = irq_info->num_irq_remain;
mutex_lock(&irq_info->irq_mutex);
if (num > free_num_irq) {
if (free_num_irq == 0) {
sdk_err(dev->dev_hdl,
"no free irq resource in cfg mgmt.\n");
mutex_unlock(&irq_info->irq_mutex);
return -ENOMEM;
}
sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt.\n",
free_num_irq);
num = free_num_irq;
}
*act_num = 0;
for (i = 0; i < num; i++) {
for (j = 0; j < max_num_irq; j++) {
if (alloc_info[j].free == CFG_FREE) {
if (irq_info->num_irq_remain == 0) {
sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n");
mutex_unlock(&irq_info->irq_mutex);
return -EINVAL;
}
alloc_info[j].type = type;
alloc_info[j].free = CFG_BUSY;
irq_info_array[i].msix_entry_idx =
alloc_info[j].info.msix_entry_idx;
irq_info_array[i].irq_id =
alloc_info[j].info.irq_id;
(*act_num)++;
irq_info->num_irq_remain--;
break;
}
}
}
mutex_unlock(&irq_info->irq_mutex);
return 0;
}
void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id)
{
struct hifc_hwdev *dev = hwdev;
struct cfg_mgmt_info *cfg_mgmt;
struct cfg_irq_info *irq_info;
struct irq_alloc_info_st *alloc_info;
int max_num_irq;
int i;
if (!hwdev)
return;
cfg_mgmt = dev->cfg_mgmt;
irq_info = &cfg_mgmt->irq_param_info;
alloc_info = irq_info->alloc_info;
max_num_irq = irq_info->num_total;
mutex_lock(&irq_info->irq_mutex);
for (i = 0; i < max_num_irq; i++) {
if (irq_id == alloc_info[i].info.irq_id &&
type == alloc_info[i].type) {
if (alloc_info[i].free == CFG_BUSY) {
alloc_info[i].free = CFG_FREE;
irq_info->num_irq_remain++;
if (irq_info->num_irq_remain > max_num_irq) {
sdk_err(dev->dev_hdl, "Find target,but over range\n");
mutex_unlock(&irq_info->irq_mutex);
return;
}
break;
}
}
}
if (i >= max_num_irq)
sdk_warn(dev->dev_hdl, "Irq %d don`t need to free\n", irq_id);
mutex_unlock(&irq_info->irq_mutex);
}
static int init_cfg_mgmt(struct hifc_hwdev *dev)
{
int err;
struct cfg_mgmt_info *cfg_mgmt;
cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
if (!cfg_mgmt)
return -ENOMEM;
dev->cfg_mgmt = cfg_mgmt;
cfg_mgmt->hwdev = dev;
err = cfg_init_eq(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n",
err);
goto free_mgmt_mem;
}
err = cfg_init_interrupt(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n",
err);
goto free_eq_mem;
}
err = cfg_enable_interrupt(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n",
err);
goto free_interrupt_mem;
}
return 0;
free_interrupt_mem:
kfree(cfg_mgmt->irq_param_info.alloc_info);
cfg_mgmt->irq_param_info.alloc_info = NULL;
free_eq_mem:
kfree(cfg_mgmt->eq_info.eq);
cfg_mgmt->eq_info.eq = NULL;
free_mgmt_mem:
kfree(cfg_mgmt);
return err;
}
static void free_cfg_mgmt(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
/* if the allocated resource were recycled */
if (cfg_mgmt->irq_param_info.num_irq_remain !=
cfg_mgmt->irq_param_info.num_total ||
cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq)
sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n");
switch (cfg_mgmt->svc_cap.interrupt_type) {
case INTR_TYPE_MSIX:
pci_disable_msix(dev->pcidev_hdl);
break;
case INTR_TYPE_MSI:
pci_disable_msi(dev->pcidev_hdl);
break;
case INTR_TYPE_INT:
default:
break;
}
kfree(cfg_mgmt->irq_param_info.alloc_info);
cfg_mgmt->irq_param_info.alloc_info = NULL;
kfree(cfg_mgmt->eq_info.eq);
cfg_mgmt->eq_info.eq = NULL;
kfree(cfg_mgmt);
}
static int init_capability(struct hifc_hwdev *dev)
{
int err;
enum func_type type = HIFC_FUNC_TYPE(dev);
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
cfg_mgmt->svc_cap.timer_en = 1;
cfg_mgmt->svc_cap.test_xid_alloc_mode = 1;
cfg_mgmt->svc_cap.test_gpa_check_enable = 1;
err = get_cap_from_fw(dev, type);
if (err) {
sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n");
return err;
}
fc_param_fix(dev);
if (dev->cfg_mgmt->svc_cap.force_up)
dev->feature_cap |= HIFC_FUNC_FORCE_LINK_UP;
sdk_info(dev->dev_hdl, "Init capability success\n");
return 0;
}
static void free_capability(struct hifc_hwdev *dev)
{
sdk_info(dev->dev_hdl, "Free capability success");
}
bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap)
{
struct hifc_hwdev *dev = hwdev;
if (!hwdev)
return false;
if (cap)
memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap));
return true;
}
u8 hifc_host_oq_id_mask(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev) {
pr_err("Hwdev pointer is NULL for getting host oq id mask\n");
return 0;
}
return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val;
}
u16 hifc_func_max_qnum(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev) {
pr_err("Hwdev pointer is NULL for getting function max queue number\n");
return 0;
}
return dev->cfg_mgmt->svc_cap.max_sqs;
}
/* Caller should ensure atomicity when calling this function */
int hifc_stateful_init(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
int err;
if (!dev)
return -EINVAL;
if (dev->statufull_ref_cnt++)
return 0;
err = cqm_init(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err);
goto init_cqm_err;
}
sdk_info(dev->dev_hdl, "Initialize statefull resource success\n");
return 0;
init_cqm_err:
dev->statufull_ref_cnt--;
return err;
}
/* Caller should ensure atomicity when calling this function */
void hifc_stateful_deinit(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev || !dev->statufull_ref_cnt)
return;
if (--dev->statufull_ref_cnt)
return;
cqm_uninit(hwdev);
sdk_info(dev->dev_hdl, "Clear statefull resource success\n");
}
bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state)
{
struct hifc_hwdev *dev = hwdev;
if (!hwdev || state >= HIFC_HWDEV_MAX_INVAL_INITED)
return false;
return !!test_bit(state, &dev->func_state);
}
static int hifc_os_dep_init(struct hifc_hwdev *hwdev)
{
hwdev->workq = create_singlethread_workqueue(HIFC_HW_WQ_NAME);
if (!hwdev->workq) {
sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n");
return -EFAULT;
}
sema_init(&hwdev->fault_list_sem, 1);
return 0;
}
static void hifc_os_dep_deinit(struct hifc_hwdev *hwdev)
{
destroy_workqueue(hwdev->workq);
}
static int __hilink_phy_init(struct hifc_hwdev *hwdev)
{
int err;
err = hifc_phy_init_status_judge(hwdev);
if (err) {
sdk_info(hwdev->dev_hdl, "Phy init failed\n");
return err;
}
return 0;
}
static int init_hwdev_and_hwif(struct hifc_init_para *para)
{
struct hifc_hwdev *hwdev;
int err;
if (!(*para->hwdev)) {
hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
if (!hwdev)
return -ENOMEM;
*para->hwdev = hwdev;
hwdev->adapter_hdl = para->adapter_hdl;
hwdev->pcidev_hdl = para->pcidev_hdl;
hwdev->dev_hdl = para->dev_hdl;
hwdev->chip_node = para->chip_node;
hwdev->chip_fault_stats = vzalloc(HIFC_CHIP_FAULT_SIZE);
if (!hwdev->chip_fault_stats)
goto alloc_chip_fault_stats_err;
err = hifc_init_hwif(hwdev, para->cfg_reg_base,
para->intr_reg_base,
para->db_base_phy, para->db_base,
para->dwqe_mapping);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init hwif\n");
goto init_hwif_err;
}
}
return 0;
init_hwif_err:
vfree(hwdev->chip_fault_stats);
alloc_chip_fault_stats_err:
*para->hwdev = NULL;
return -EFAULT;
}
static void deinit_hwdev_and_hwif(struct hifc_hwdev *hwdev)
{
hifc_free_hwif(hwdev);
vfree(hwdev->chip_fault_stats);
kfree(hwdev);
}
static int init_hw_cfg(struct hifc_hwdev *hwdev)
{
int err;
err = init_capability(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init capability\n");
return err;
}
err = __hilink_phy_init(hwdev);
if (err)
goto hilink_phy_init_err;
return 0;
hilink_phy_init_err:
free_capability(hwdev);
return err;
}
/* Return:
* 0: all success
* >0: partitial success
* <0: all failed
*/
int hifc_init_hwdev(struct hifc_init_para *para)
{
struct hifc_hwdev *hwdev;
int err;
err = init_hwdev_and_hwif(para);
if (err)
return err;
hwdev = *para->hwdev;
/* detect slave host according to BAR reg */
hwdev->feature_cap = HIFC_FUNC_MGMT | HIFC_FUNC_PORT |
HIFC_FUNC_SUPP_RATE_LIMIT | HIFC_FUNC_SUPP_DFX_REG |
HIFC_FUNC_SUPP_RX_MODE | HIFC_FUNC_SUPP_SET_VF_MAC_VLAN |
HIFC_FUNC_SUPP_CHANGE_MAC;
err = hifc_os_dep_init(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n");
goto os_dep_init_err;
}
hifc_set_chip_present(hwdev);
hifc_init_heartbeat(hwdev);
err = init_cfg_mgmt(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n");
goto init_cfg_mgmt_err;
}
err = hifc_init_comm_ch(hwdev);
if (err) {
if (!(hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK)) {
sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n");
goto init_comm_ch_err;
} else {
sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n");
return hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK;
}
}
err = init_hw_cfg(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init hardware config\n");
goto init_hw_cfg_err;
}
set_bit(HIFC_HWDEV_ALL_INITED, &hwdev->func_state);
sdk_info(hwdev->dev_hdl, "Init hwdev success\n");
return 0;
init_hw_cfg_err:
return (hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK);
init_comm_ch_err:
free_cfg_mgmt(hwdev);
init_cfg_mgmt_err:
hifc_destroy_heartbeat(hwdev);
hifc_os_dep_deinit(hwdev);
os_dep_init_err:
deinit_hwdev_and_hwif(hwdev);
*para->hwdev = NULL;
return -EFAULT;
}
void hifc_free_hwdev(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
enum hifc_hwdev_init_state state = HIFC_HWDEV_ALL_INITED;
int flag = 0;
if (!hwdev)
return;
if (test_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state)) {
clear_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state);
/* BM slave function not need to exec rx_tx_flush */
hifc_func_rx_tx_flush(hwdev);
free_capability(dev);
}
while (state > HIFC_HWDEV_NONE_INITED) {
if (test_bit(state, &dev->func_state)) {
flag = 1;
break;
}
state--;
}
if (flag) {
hifc_uninit_comm_ch(dev);
free_cfg_mgmt(dev);
hifc_destroy_heartbeat(dev);
hifc_os_dep_deinit(dev);
}
clear_bit(HIFC_HWDEV_NONE_INITED, &dev->func_state);
deinit_hwdev_and_hwif(dev);
}
u64 hifc_get_func_feature_cap(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev) {
pr_err("Hwdev pointer is NULL for getting function feature capability\n");
return 0;
}
return dev->feature_cap;
}

171
hifc/hifc_cfg.h Normal file
View File

@ -0,0 +1,171 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __CFG_MGT_H__
#define __CFG_MGT_H__
enum {
CFG_FREE = 0,
CFG_BUSY = 1
};
/* FC */
#define FC_PCTX_SZ 256
#define FC_CCTX_SZ 256
#define FC_SQE_SZ 128
#define FC_SCQC_SZ 64
#define FC_SCQE_SZ 64
#define FC_SRQC_SZ 64
#define FC_SRQE_SZ 32
/* device capability */
struct service_cap {
/* Host global resources */
u16 host_total_function;
u8 host_oq_id_mask_val;
/* DO NOT get interrupt_type from firmware */
enum intr_type interrupt_type;
u8 intr_chip_en;
u8 port_id; /* PF/VF's physical port */
u8 force_up;
u8 timer_en; /* 0:disable, 1:enable */
u16 max_sqs;
u16 max_rqs;
/* For test */
bool test_xid_alloc_mode;
bool test_gpa_check_enable;
u32 max_connect_num; /* PF/VF maximum connection number(1M) */
/* The maximum connections which can be stick to cache memory, max 1K */
u16 max_stick2cache_num;
struct nic_service_cap nic_cap; /* NIC capability */
struct fc_service_cap fc_cap; /* FC capability */
};
struct hifc_sync_time_info {
u8 status;
u8 version;
u8 rsvd0[6];
u64 mstime;
};
struct cfg_eq {
enum hifc_service_type type;
int eqn;
int free; /* 1 - alocated, 0- freed */
};
struct cfg_eq_info {
struct cfg_eq *eq;
u8 num_ceq;
u8 num_ceq_remain;
/* mutex used for allocate EQs */
struct mutex eq_mutex;
};
struct irq_alloc_info_st {
enum hifc_service_type type;
int free; /* 1 - alocated, 0- freed */
struct irq_info info;
};
struct cfg_irq_info {
struct irq_alloc_info_st *alloc_info;
u16 num_total;
u16 num_irq_remain;
u16 num_irq_hw; /* device max irq number */
/* mutex used for allocate EQs */
struct mutex irq_mutex;
};
#define VECTOR_THRESHOLD 2
struct cfg_mgmt_info {
struct hifc_hwdev *hwdev;
struct service_cap svc_cap;
struct cfg_eq_info eq_info; /* EQ */
struct cfg_irq_info irq_param_info; /* IRQ */
u32 func_seq_num; /* temporary */
};
enum cfg_sub_cmd {
/* PPF(PF) <-> FW */
HIFC_CFG_NIC_CAP = 0,
CFG_FW_VERSION,
CFG_UCODE_VERSION,
HIFC_CFG_FUNC_CAP,
HIFC_CFG_MBOX_CAP = 6,
};
struct hifc_dev_cap {
u8 status;
u8 version;
u8 rsvd0[6];
/* Public resource */
u8 sf_svc_attr;
u8 host_id;
u8 sf_en_pf;
u8 sf_en_vf;
u8 ep_id;
u8 intr_type;
u8 max_cos_id;
u8 er_id;
u8 port_id;
u8 max_vf;
u16 svc_cap_en;
u16 host_total_func;
u8 host_oq_id_mask_val;
u8 max_vf_cos_id;
u32 max_conn_num;
u16 max_stick2cache_num;
u16 max_bfilter_start_addr;
u16 bfilter_len;
u16 hash_bucket_num;
u8 cfg_file_ver;
u8 net_port_mode;
u8 valid_cos_bitmap; /* every bit indicate cos is valid */
u8 force_up;
u32 pf_num;
u32 pf_id_start;
u32 vf_num;
u32 vf_id_start;
/* shared resource */
u32 host_pctx_num;
u8 host_sf_en;
u8 rsvd2[3];
u32 host_ccxt_num;
u32 host_scq_num;
u32 host_srq_num;
u32 host_mpt_num;
/* l2nic */
u16 nic_max_sq;
u16 nic_max_rq;
u32 rsvd[46];
/* FC */
u32 fc_max_pctx;
u32 fc_max_scq;
u32 fc_max_srq;
u32 fc_max_cctx;
u32 fc_cctx_id_start;
u8 fc_vp_id_start;
u8 fc_vp_id_end;
u16 func_id;
};
#endif

2095
hifc/hifc_chipitf.c Normal file

File diff suppressed because it is too large Load Diff

643
hifc/hifc_chipitf.h Normal file
View File

@ -0,0 +1,643 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_CHIPITF_H__
#define __HIFC_CHIPITF_H__
#include "unf_log.h"
#include "hifc_utils.h"
#include "hifc_module.h"
#include "hifc_service.h"
/* CONF_API_CMND */
#define HIFC_MBOX_CONFIG_API 0x00
#define HIFC_MBOX_CONFIG_API_STS 0xA0
/* GET_CHIP_INFO_API_CMD */
#define HIFC_MBOX_GET_CHIP_INFO 0x01
#define HIFC_MBOX_GET_CHIP_INFO_STS 0xA1
/* PORT_RESET */
#define HIFC_MBOX_PORT_RESET 0x02
#define HIFC_MBOX_PORT_RESET_STS 0xA2
/* SFP_SWITCH_API_CMND */
#define HIFC_MBOX_PORT_SWITCH 0x03
#define HIFC_MBOX_PORT_SWITCH_STS 0xA3
/* GET_SFP_INFO */
#define HIFC_MBOX_GET_SFP_INFO 0x04
#define HIFC_MBOX_GET_SFP_INFO_STS 0xA4
/* CONF_AF_LOGIN_API_CMND */
#define HIFC_MBOX_CONFIG_LOGIN_API 0x06
#define HIFC_MBOX_CONFIG_LOGIN_API_STS 0xA6
/* BUFFER_CLEAR_DONE_CMND */
#define HIFC_MBOX_BUFFER_CLEAR_DONE 0x07
#define HIFC_MBOX_BUFFER_CLEAR_DONE_STS 0xA7
#define HIFC_MBOX_GET_ERR_CODE 0x08
#define HIFC_MBOX_GET_ERR_CODE_STS 0xA8
#define HIFC_MBOX_GET_UP_STATE 0x09
#define HIFC_MBOX_GET_UP_STATE_STS 0xA9
/* LOOPBACK MODE */
#define HIFC_MBOX_LOOPBACK_MODE 0x0A
#define HIFC_MBOX_LOOPBACK_MODE_STS 0xAA
/* REG RW MODE */
#define HIFC_MBOX_REG_RW_MODE 0x0B
#define HIFC_MBOX_REG_RW_MODE_STS 0xAB
/* GET CLEAR DONE STATE */
#define HIFC_MBOX_GET_CLEAR_STATE 0x0E
#define HIFC_MBOX_GET_CLEAR_STATE_STS 0xAE
/* GET UP & UCODE VER */
#define HIFC_MBOX_GET_FW_VERSION 0x0F
#define HIFC_MBOX_GET_FW_VERSION_STS 0xAF
/* CONFIG TIMER */
#define HIFC_MBOX_CONFIG_TIMER 0x10
#define HIFC_MBOX_CONFIG_TIMER_STS 0xB0
/* CONFIG SRQC */
#define HIFC_MBOX_CONFIG_SRQC 0x11
#define HIFC_MBOX_CONFIG_SRQC_STS 0xB1
/* Led Test */
#define HIFC_MBOX_LED_TEST 0x12
#define HIFC_MBOX_LED_TEST_STS 0xB2
/* set esch */
#define HIFC_MBOX_SET_ESCH 0x13
#define HIFC_MBOX_SET_ESCH_STS 0xB3
/* set get tx serdes */
#define HIFC_MBOX_SET_GET_SERDES_TX 0x14
#define HIFC_MBOX_SET_GET_SERDES_TX_STS 0xB4
/* get rx serdes */
#define HIFC_MBOX_GET_SERDES_RX 0x15
#define HIFC_MBOX_GET_SERDES_RX_STS 0xB5
/* i2c read write */
#define HIFC_MBOX_I2C_WR_RD 0x16
#define HIFC_MBOX_I2C_WR_RD_STS 0xB6
/* Set FEC Enable */
#define HIFC_MBOX_CONFIG_FEC 0x17
#define HIFC_MBOX_CONFIG_FEC_STS 0xB7
/* GET UCODE STATS CMD */
#define HIFC_MBOX_GET_UCODE_STAT 0x18
#define HIFC_MBOX_GET_UCODE_STAT_STS 0xB8
/* gpio read write */
#define HIFC_MBOX_GPIO_WR_RD 0x19
#define HIFC_MBOX_GPIO_WR_RD_STS 0xB9
/* GET PORT INFO CMD */
#define HIFC_MBOX_GET_PORT_INFO 0x20
#define HIFC_MBOX_GET_PORT_INFO_STS 0xC0
/* save hba info CMD */
#define HIFC_MBOX_SAVE_HBA_INFO 0x24
#define HIFC_MBOX_SAVE_HBA_INFO_STS 0xc4
#define HIFC_MBOX_FLASH_DATA_MGMT 0x25
#define HIFC_MBOX_FLASH_DATA_MGMT_STS 0xc5
/* FCOE: DRV->UP */
#define HIFC_MBOX_SEND_ELS_CMD 0x2A
#define HIFC_MBOX_SEND_VPORT_INFO 0x2B
/* FC: UP->DRV */
#define HIFC_MBOX_RECV_FC_LINKUP 0x40
#define HIFC_MBOX_RECV_FC_LINKDOWN 0x41
#define HIFC_MBOX_RECV_FC_DELCMD 0x42
#define HIFC_MBOX_RECV_FC_ERROR 0x43
#define LOOP_MAP_VALID 1
#define LOOP_MAP_INVALID 0
#define HIFC_MBOX_SIZE 1024
#define HIFC_MBOX_HEADER_SIZE 4
#define ATUOSPEED 1
#define FIXEDSPEED 0
#define UNDEFINEOPCODE 0
#define VALUEMASK_L 0x00000000FFFFFFFF
#define VALUEMASK_H 0xFFFFFFFF00000000
#define STATUS_OK 0
#define STATUS_FAIL 1
enum hifc_drv_2_up_unblock_msg_cmd_code_e {
HIFC_SEND_ELS_CMD,
HIFC_SEND_ELS_CMD_FAIL,
HIFC_RCV_ELS_CMD_RSP,
HIFC_SEND_CONFIG_LOGINAPI,
HIFC_SEND_CONFIG_LOGINAPI_FAIL,
HIFC_RCV_CONFIG_LOGIN_API_RSP,
HIFC_SEND_CLEAR_DONE,
HIFC_SEND_CLEAR_DONE_FAIL,
HIFC_RCV_CLEAR_DONE_RSP,
HIFC_SEND_VPORT_INFO_DONE,
HIFC_SEND_VPORT_INFO_FAIL,
HIFC_SEND_VPORT_INFO_RSP,
HIFC_MBOX_CMD_BUTT
};
/* up to driver handle templete */
struct hifc_up_2_drv_msg_handle_s {
unsigned char cmd;
unsigned int (*pfn_hifc_msg_up2drv_handler)(struct hifc_hba_s *v_hba,
void *v_buf_in);
};
/* Mbox Common Header */
struct hifc_mbox_header_s {
unsigned char cmnd_type;
unsigned char length;
unsigned char port_id;
unsigned char reserved;
};
/* open or close the sfp */
struct hifc_inbox_port_switch_s {
struct hifc_mbox_header_s header;
unsigned char op_code;
unsigned char port_type;
unsigned short reserved;
unsigned char host_id;
unsigned char pf_id;
unsigned char fcoe_mode;
unsigned char reserved2;
unsigned short conf_vlan;
unsigned short reserved3;
unsigned long long sys_port_wwn;
unsigned long long sys_node_name;
};
struct hifc_outbox_port_switch_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
/* config API */
struct hifc_inbox_config_api_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 8;
unsigned int reserved1 : 24;
unsigned char topy_mode;
unsigned char sfp_speed;
unsigned char max_speed;
unsigned char hard_alpa;
unsigned char port_name[UNF_WWN_LEN];
unsigned int slave : 1;
unsigned int auto_sneg : 1;
unsigned int reserved2 : 30;
unsigned int rx_bbcredit_32g : 16; /* 160 */
unsigned int rx_bbcredit_16g : 16; /* 80 */
unsigned int rx_bbcredit_842g : 16; /* 50 */
unsigned int rdy_cnt_bf_fst_frm : 16; /* 8 */
unsigned int esch_value_32g;
unsigned int esch_value_16g;
unsigned int esch_value_8g;
unsigned int esch_value_4g;
unsigned int esch_value_2g;
unsigned int esch_bust_size;
};
struct hifc_outbox_config_api_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
/* Get chip info */
struct hifc_inbox_get_chip_info_s {
struct hifc_mbox_header_s header;
};
struct hifc_outbox_get_chip_info_sts_s {
struct hifc_mbox_header_s header;
unsigned char status;
unsigned char board_type;
unsigned char rvsd;
unsigned char tape_support : 1;
unsigned char reserved : 7;
unsigned long long wwpn;
unsigned long long wwnn;
unsigned long long sys_mac;
};
/* Get reg info */
struct hifc_inmbox_get_reg_info_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 1;
unsigned int reg_len : 8;
unsigned int rsvd : 23;
unsigned int reg_addr;
unsigned int reg_value_l32;
unsigned int reg_value_h32;
unsigned int rvsd[27];
};
/* Get reg info sts */
struct hifc_outmbox_get_reg_info_sts_s {
struct hifc_mbox_header_s header;
unsigned short rvsd0;
unsigned char rvsd1;
unsigned char status;
unsigned int reg_value_l32;
unsigned int reg_value_h32;
unsigned int rvsd[28];
};
/* Config login API */
struct hifc_inmbox_config_login_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 8;
unsigned int reserved1 : 24;
unsigned short tx_bb_credit;
unsigned short reserved2;
unsigned int rtov;
unsigned int etov;
unsigned int rt_tov_tag : 1;
unsigned int ed_tov_tag : 1;
unsigned int bb_credit : 6;
unsigned int bbscn : 8;
unsigned int lr_flag : 16;
};
struct hifc_outmbox_config_login_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
/* port reset */
#define HIFC_MBOX_SUBTYPE_LIGHT_RESET 0x0
#define HIFC_MBOX_SUBTYPE_HEAVY_RESET 0x1
struct hifc_inmbox_port_reset_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 8;
unsigned int reserved1 : 24;
};
struct hifc_outmbox_port_reset_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
struct hifc_inmbox_get_sfp_info_s {
struct hifc_mbox_header_s header;
};
struct hifc_outmbox_get_sfp_info_sts_s {
struct hifc_mbox_header_s header;
unsigned int rcvd : 8;
unsigned int length : 16;
unsigned int status : 8;
};
/* get and clear error code */
struct hifc_inmbox_get_err_code_s {
struct hifc_mbox_header_s header;
};
struct hifc_outmbox_get_err_code_sts_s {
struct hifc_mbox_header_s header;
unsigned short rsvd;
unsigned char rsvd2;
unsigned char status;
unsigned int err_code[8];
};
/* uP-->Driver asyn event API */
struct hifc_link_event_s {
struct hifc_mbox_header_s header;
unsigned char link_event;
unsigned char reason;
unsigned char speed;
unsigned char top_type;
unsigned char alpa_value;
unsigned char reserved1;
unsigned short paticpate : 1;
unsigned short acled : 1;
unsigned short yellow_speed_led : 1;
unsigned short green_speed_led : 1;
unsigned short reserved : 12;
unsigned char loop_map_info[128];
};
enum hifc_up_err_type_e {
HIFC_UP_ERR_DRV_PARA = 0,
HIFC_UP_ERR_SFP = 1,
HIFC_UP_ERR_32G_PUB = 2,
HIFC_UP_ERR_32G_UA = 3,
HIFC_UP_ERR_32G_MAC = 4,
HIFC_UP_ERR_NON32G_DFX = 5,
HIFC_UP_ERR_NON32G_MAC = 6,
HIFC_UP_ERR_BUTT
};
enum hifc_up_err_value_e {
/* ERR type 0 */
HIFC_DRV_2_UP_PARA_ERR = 0,
/* ERR type 1 */
HIFC_SFP_SPEED_ERR,
/* ERR type 2 */
HIFC_32GPUB_UA_RXESCH_FIFO_OF,
HIFC_32GPUB_UA_RXESCH_FIFO_UCERR,
/* ERR type 3 */
HIFC_32G_UA_UATX_LEN_ABN,
HIFC_32G_UA_RXAFIFO_OF,
HIFC_32G_UA_TXAFIFO_OF,
HIFC_32G_UA_RXAFIFO_UCERR,
HIFC_32G_UA_TXAFIFO_UCERR,
/* ERR type 4 */
HIFC_32G_MAC_RX_BBC_FATAL,
HIFC_32G_MAC_TX_BBC_FATAL,
HIFC_32G_MAC_TXFIFO_UF,
HIFC_32G_MAC_PCS_TXFIFO_UF,
HIFC_32G_MAC_RXBBC_CRDT_TO,
HIFC_32G_MAC_PCS_RXAFIFO_OF,
HIFC_32G_MAC_PCS_TXFIFO_OF,
HIFC_32G_MAC_FC2P_RXFIFO_OF,
HIFC_32G_MAC_FC2P_TXFIFO_OF,
HIFC_32G_MAC_FC2P_CAFIFO_OF,
HIFC_32G_MAC_PCS_RXRSFECM_UCEER,
HIFC_32G_MAC_PCS_RXAFIFO_UCEER,
HIFC_32G_MAC_PCS_TXFIFO_UCEER,
HIFC_32G_MAC_FC2P_RXFIFO_UCEER,
HIFC_32G_MAC_FC2P_TXFIFO_UCEER,
/* ERR type 5 */
HIFC_NON32G_DFX_FC1_DFX_BF_FIFO,
HIFC_NON32G_DFX_FC1_DFX_BP_FIFO,
HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR,
HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO,
HIFC_NON32G_DFX_FC1_ERR_R_RDY,
/* ERR type 6 */
HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR,
HIFC_ERR_VALUE_BUTT
};
struct hifc_up_error_event_s {
struct hifc_mbox_header_s header;
unsigned char link_event;
unsigned char error_level;
unsigned char error_type;
unsigned char error_value;
};
struct hifc_inmbx_clear_node_s {
struct hifc_mbox_header_s header;
};
struct hifc_inmbox_get_clear_state_s {
struct hifc_mbox_header_s header;
unsigned int resvd[31];
};
struct hifc_outmbox_get_clear_state_sts_s {
struct hifc_mbox_header_s header;
unsigned short rsvd;
unsigned char state; /* 1--clear doing. 0---clear done. */
unsigned char status; /* 0--ok,!0---fail */
unsigned int resvd[30];
};
#define HIFC_FIP_MODE_VN2VF 0
#define HIFC_FIP_MODE_VN2VN 1
/* get port state */
struct hifc_inmbox_get_port_info_s {
struct hifc_mbox_header_s header;
};
/* save hba info */
struct hifc_inmbox_save_hba_info_s {
struct hifc_mbox_header_s header;
unsigned int hba_save_info[254];
};
struct hifc_outmbox_get_port_info_sts_s {
struct hifc_mbox_header_s header;
unsigned int status : 8;
unsigned int fec_vis_tts_16g : 8;
unsigned int bbscn : 8;
unsigned int loop_credit : 8;
unsigned int non_loop_rx_credit : 8;
unsigned int non_loop_tx_credit : 8;
unsigned int sfp_speed : 8;
unsigned int present : 8;
};
struct hifc_outmbox_save_hba_info_sts_s {
struct hifc_mbox_header_s header;
unsigned short rsvd1;
unsigned char rsvd2;
unsigned char status;
unsigned int rsvd3;
unsigned int save_hba_info[252];
};
#define HIFC_VER_ADDR_OFFSET (8)
struct hifc_inmbox_get_fw_version_s {
struct hifc_mbox_header_s header;
};
struct hifc_outmbox_get_fw_version_sts_s {
struct hifc_mbox_header_s header;
unsigned char status;
unsigned char rsv[3];
unsigned char ucode_ver[HIFC_VER_LEN];
unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char up_ver[HIFC_VER_LEN];
unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char boot_ver[HIFC_VER_LEN];
unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN];
};
/* Set Fec Enable */
struct hifc_inmbox_config_fec_s {
struct hifc_mbox_header_s header;
unsigned char fec_op_code;
unsigned char rsv0;
unsigned short rsv1;
};
struct hifc_outmbox_config_fec_sts_s {
struct hifc_mbox_header_s header;
unsigned short usrsv0;
unsigned char ucrsv1;
unsigned char status;
};
struct hifc_inmbox_config_timer_s {
struct hifc_mbox_header_s header;
unsigned short op_code;
unsigned short fun_id;
unsigned int user_data;
};
struct hifc_outmbox_config_timer_sts_s {
struct hifc_mbox_header_s header;
unsigned char status;
unsigned char rsv[3];
};
union hifc_outmbox_generic_u {
struct {
struct hifc_mbox_header_s header;
unsigned int rsvd[(HIFC_MBOX_SIZE - HIFC_MBOX_HEADER_SIZE) /
sizeof(unsigned int)];
} generic;
struct hifc_outbox_port_switch_sts_s port_switch_sts;
struct hifc_outbox_config_api_sts_s config_api_sts;
struct hifc_outbox_get_chip_info_sts_s get_chip_info_sts;
struct hifc_outmbox_get_reg_info_sts_s get_reg_info_sts;
struct hifc_outmbox_config_login_sts_s config_login_sts;
struct hifc_outmbox_port_reset_sts_s port_reset_sts;
struct hifc_outmbox_get_sfp_info_sts_s get_sfp_info_sts;
struct hifc_outmbox_get_err_code_sts_s get_err_code_sts;
struct hifc_outmbox_get_clear_state_sts_s get_clr_state_sts;
struct hifc_outmbox_get_fw_version_sts_s get_fw_ver_sts;
struct hifc_outmbox_config_fec_sts_s config_fec_sts;
struct hifc_outmbox_config_timer_sts_s timer_config_sts;
struct hifc_outmbox_get_port_info_sts_s get_port_info_sts;
struct unf_flash_data_mgmt_sts_s flash_data_sts;
};
unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac);
unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba);
unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on);
unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act);
unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg);
unsigned int hifc_get_loop_map(void *v_hba, void *v_buf);
unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_ver);
unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bb_credit);
unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn);
unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code);
unsigned int hifc_get_port_current_info(void *v_hba, void *v_port_info);
unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out);
unsigned int hifc_get_software_version(void *v_fc_port, void *v_ver);
unsigned int hifc_get_port_info(void *v_hba);
unsigned int hifc_rw_reg(void *v_hba, void *v_params);
unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code);
unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info);
unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_ver);
unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state);
unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa);
unsigned int hifc_get_topo_act(void *v_hba, void *v_topo_act);
unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg);
unsigned int hifc_config_login_api(
struct hifc_hba_s *v_hba,
struct unf_port_login_parms_s *v_login_parms);
unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba,
const void *v_in_mbox,
unsigned short in_size,
union hifc_outmbox_generic_u
*v_out_mbox);
void hifc_up_msg_2_driver_proc(void *v_hwdev_handle,
void *v_pri_handle,
unsigned char v_cmd,
void *v_buf_in,
unsigned short v_in_size,
void *v_buf_out,
unsigned short *v_out_size);
unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba,
unsigned char v_sub_type);
unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba);
unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in);
unsigned int hifc_update_port_param(void *v_hba, void *v_para_in);
unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba,
unsigned int *v_clear_state);
unsigned short hifc_get_global_base_qpn(void *v_handle);
unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba,
unsigned int v_fec_opcode);
unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba,
int v_opcode,
unsigned int v_user_data);
unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in);
unsigned int hifc_get_chip_capability(void *hw_dev_handle,
struct hifc_chip_info_s *v_chip_info);
unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data);
unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data);
#endif

1507
hifc/hifc_cmdq.c Normal file

File diff suppressed because it is too large Load Diff

210
hifc/hifc_cmdq.h Normal file
View File

@ -0,0 +1,210 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_CMDQ_H_
#define HIFC_CMDQ_H_
#define HIFC_DB_OFF 0x00000800
#define HIFC_SCMD_DATA_LEN 16
#define HIFC_CMDQ_DEPTH 4096
#define HIFC_CMDQ_BUF_SIZE 2048U
#define HIFC_CMDQ_BUF_HW_RSVD 8
#define HIFC_CMDQ_MAX_DATA_SIZE \
(HIFC_CMDQ_BUF_SIZE - HIFC_CMDQ_BUF_HW_RSVD)
#define WQ_PAGE_PFN_SHIFT 12
#define WQ_BLOCK_PFN_SHIFT 9
#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
enum hifc_cmdq_type {
HIFC_CMDQ_SYNC,
HIFC_CMDQ_ASYNC,
HIFC_MAX_CMDQ_TYPES,
};
enum hifc_db_src_type {
HIFC_DB_SRC_CMDQ_TYPE,
HIFC_DB_SRC_L2NIC_SQ_TYPE,
};
enum hifc_cmdq_db_type {
HIFC_DB_SQ_RQ_TYPE,
HIFC_DB_CMDQ_TYPE,
};
/* CMDQ WQE CTRLS */
struct hifc_cmdq_header {
u32 header_info;
u32 saved_data;
};
struct hifc_scmd_bufdesc {
u32 buf_len;
u32 rsvd;
u8 data[HIFC_SCMD_DATA_LEN];
};
struct hifc_lcmd_bufdesc {
struct hifc_sge sge;
u32 rsvd1;
u64 saved_async_buf;
u64 rsvd3;
};
struct hifc_cmdq_db {
u32 db_info;
u32 rsvd;
};
struct hifc_status {
u32 status_info;
};
struct hifc_ctrl {
u32 ctrl_info;
};
struct hifc_sge_resp {
struct hifc_sge sge;
u32 rsvd;
};
struct hifc_cmdq_completion {
/* HW Format */
union {
struct hifc_sge_resp sge_resp;
u64 direct_resp;
};
};
struct hifc_cmdq_wqe_scmd {
struct hifc_cmdq_header header;
struct hifc_cmdq_db db;
struct hifc_status status;
struct hifc_ctrl ctrl;
struct hifc_cmdq_completion completion;
struct hifc_scmd_bufdesc buf_desc;
};
struct hifc_cmdq_wqe_lcmd {
struct hifc_cmdq_header header;
struct hifc_status status;
struct hifc_ctrl ctrl;
struct hifc_cmdq_completion completion;
struct hifc_lcmd_bufdesc buf_desc;
};
struct hifc_cmdq_inline_wqe {
struct hifc_cmdq_wqe_scmd wqe_scmd;
};
struct hifc_cmdq_wqe {
/* HW Format */
union {
struct hifc_cmdq_inline_wqe inline_wqe;
struct hifc_cmdq_wqe_lcmd wqe_lcmd;
};
};
struct hifc_cmdq_arm_bit {
u32 q_type;
u32 q_id;
};
struct hifc_cmdq_ctxt_info {
u64 curr_wqe_page_pfn;
u64 wq_block_pfn;
};
struct hifc_cmdq_ctxt {
u8 status;
u8 version;
u8 rsvd0[6];
u16 func_idx;
u8 cmdq_id;
u8 ppf_idx;
u8 rsvd1[4];
struct hifc_cmdq_ctxt_info ctxt_info;
};
enum hifc_cmdq_status {
HIFC_CMDQ_ENABLE = BIT(0),
};
enum hifc_cmdq_cmd_type {
HIFC_CMD_TYPE_NONE,
HIFC_CMD_TYPE_SET_ARM,
HIFC_CMD_TYPE_DIRECT_RESP,
HIFC_CMD_TYPE_SGE_RESP,
HIFC_CMD_TYPE_ASYNC,
HIFC_CMD_TYPE_TIMEOUT,
HIFC_CMD_TYPE_FAKE_TIMEOUT,
};
struct hifc_cmdq_cmd_info {
enum hifc_cmdq_cmd_type cmd_type;
struct completion *done;
int *errcode;
int *cmpt_code;
u64 *direct_resp;
u64 cmdq_msg_id;
};
struct hifc_cmdq {
struct hifc_wq *wq;
enum hifc_cmdq_type cmdq_type;
int wrapped;
/* spinlock for send cmdq commands */
spinlock_t cmdq_lock;
/* doorbell area */
u8 __iomem *db_base;
struct hifc_cmdq_ctxt cmdq_ctxt;
struct hifc_cmdq_cmd_info *cmd_infos;
struct hifc_hwdev *hwdev;
};
struct hifc_cmdqs {
struct hifc_hwdev *hwdev;
struct pci_pool *cmd_buf_pool;
struct hifc_wq *saved_wqs;
struct hifc_cmdq_pages cmdq_pages;
struct hifc_cmdq cmdq[HIFC_MAX_CMDQ_TYPES];
u32 status;
u32 disable_flag;
};
void hifc_cmdq_ceq_handler(void *hwdev, u32 ceqe_data);
int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev);
bool hifc_cmdq_idle(struct hifc_cmdq *cmdq);
int hifc_cmdqs_init(struct hifc_hwdev *hwdev);
void hifc_cmdqs_free(struct hifc_hwdev *hwdev);
void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev,
struct hifc_cmdq *cmdq);
#endif

694
hifc/hifc_cqm_main.c Normal file
View File

@ -0,0 +1,694 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_cfg.h"
#include "hifc_cqm_object.h"
#include "hifc_cqm_main.h"
#define GET_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define GET_MIN(a, b) (((a) < (b)) ? (a) : (b))
static void cqm_capability_init_check_ppf(void *ex_handle,
u32 *total_function_num)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap;
struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
(handle->cqm_hdl);
if (cqm_handle->func_attribute.func_type == CQM_PPF) {
*total_function_num = service_capability->host_total_function;
cqm_handle->func_capability.timer_enable =
service_capability->timer_en;
cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n",
*total_function_num);
cqm_info(handle->dev_hdl, "Cap init: timer_enable %d (1: enable; 0: disable)\n",
cqm_handle->func_capability.timer_enable);
}
}
void cqm_test_mode_init(struct cqm_handle_s *cqm_handle,
struct service_cap *service_capability)
{
cqm_handle->func_capability.xid_alloc_mode =
service_capability->test_xid_alloc_mode;
cqm_handle->func_capability.gpa_check_enable =
service_capability->test_gpa_check_enable;
}
static s32 cqm_service_capability_init_for_each(
struct cqm_handle_s *cqm_handle,
struct service_cap *service_capability)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)cqm_handle->ex_handle;
cqm_info(handle->dev_hdl, "Cap init: fc is valid\n");
cqm_handle->func_capability.hash_number +=
service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num;
cqm_handle->func_capability.hash_basic_size = CQM_HASH_BUCKET_SIZE_64;
cqm_handle->func_capability.qpc_number +=
service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num;
cqm_handle->func_capability.qpc_basic_size =
GET_MAX(service_capability->fc_cap.parent_qpc_size,
cqm_handle->func_capability.qpc_basic_size);
cqm_handle->func_capability.qpc_alloc_static = true;
cqm_handle->func_capability.scqc_number +=
service_capability->fc_cap.dev_fc_cap.scq_num;
cqm_handle->func_capability.scqc_basic_size =
GET_MAX(service_capability->fc_cap.scqc_size,
cqm_handle->func_capability.scqc_basic_size);
cqm_handle->func_capability.srqc_number +=
service_capability->fc_cap.dev_fc_cap.srq_num;
cqm_handle->func_capability.srqc_basic_size =
GET_MAX(service_capability->fc_cap.srqc_size,
cqm_handle->func_capability.srqc_basic_size);
cqm_handle->func_capability.lun_number = CQM_LUN_FC_NUM;
cqm_handle->func_capability.lun_basic_size = CQM_LUN_SIZE_8;
cqm_handle->func_capability.taskmap_number = CQM_TASKMAP_FC_NUM;
cqm_handle->func_capability.taskmap_basic_size = PAGE_SIZE;
cqm_handle->func_capability.childc_number +=
service_capability->fc_cap.dev_fc_cap.max_child_qpc_num;
cqm_handle->func_capability.childc_basic_size =
GET_MAX(service_capability->fc_cap.child_qpc_size,
cqm_handle->func_capability.childc_basic_size);
cqm_handle->func_capability.pagesize_reorder = CQM_FC_PAGESIZE_ORDER;
return CQM_SUCCESS;
}
s32 cqm_service_capability_init(struct cqm_handle_s *cqm_handle,
struct service_cap *service_capability)
{
cqm_handle->service.has_register = false;
cqm_handle->service.buf_order = 0;
if (cqm_service_capability_init_for_each(
cqm_handle,
service_capability) == CQM_FAIL)
return CQM_FAIL;
return CQM_SUCCESS;
}
/**
* cqm_capability_init - Initialize capability of cqm function and service,
* need to read information from the configuration management module
* @ex_handle: handle of hwdev
*/
s32 cqm_capability_init(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap;
struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *)
(handle->cqm_hdl);
u32 total_function_num = 0;
int err = 0;
cqm_capability_init_check_ppf(ex_handle, &total_function_num);
cqm_handle->func_capability.flow_table_based_conn_number =
service_capability->max_connect_num;
cqm_handle->func_capability.flow_table_based_conn_cache_number =
service_capability->max_stick2cache_num;
cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n",
cqm_handle->func_capability.flow_table_based_conn_number,
cqm_handle->func_capability.flow_table_based_conn_cache_number);
cqm_handle->func_capability.qpc_reserved = 0;
cqm_handle->func_capability.mpt_reserved = 0;
cqm_handle->func_capability.qpc_alloc_static = false;
cqm_handle->func_capability.scqc_alloc_static = false;
cqm_handle->func_capability.l3i_number = CQM_L3I_COMM_NUM;
cqm_handle->func_capability.l3i_basic_size = CQM_L3I_SIZE_8;
cqm_handle->func_capability.timer_number = CQM_TIMER_ALIGN_SCALE_NUM *
total_function_num;
cqm_handle->func_capability.timer_basic_size = CQM_TIMER_SIZE_32;
if (cqm_service_capability_init(cqm_handle, service_capability) ==
CQM_FAIL) {
cqm_err(handle->dev_hdl,
CQM_FUNCTION_FAIL(cqm_service_capability_init));
err = CQM_FAIL;
goto out;
}
cqm_test_mode_init(cqm_handle, service_capability);
cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %d\n",
cqm_handle->func_capability.pagesize_reorder);
cqm_info(handle->dev_hdl, "Cap init: xid_alloc_mode %d, gpa_check_enable %d\n",
cqm_handle->func_capability.xid_alloc_mode,
cqm_handle->func_capability.gpa_check_enable);
cqm_info(handle->dev_hdl, "Cap init: qpc_alloc_mode %d, scqc_alloc_mode %d\n",
cqm_handle->func_capability.qpc_alloc_static,
cqm_handle->func_capability.scqc_alloc_static);
cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x\n",
cqm_handle->func_capability.hash_number);
cqm_info(handle->dev_hdl, "Cap init: qpc_number 0x%x, qpc_reserved 0x%x\n",
cqm_handle->func_capability.qpc_number,
cqm_handle->func_capability.qpc_reserved);
cqm_info(handle->dev_hdl, "Cap init: scqc_number 0x%x scqc_reserved 0x%x\n",
cqm_handle->func_capability.scqc_number,
cqm_handle->func_capability.scq_reserved);
cqm_info(handle->dev_hdl, "Cap init: srqc_number 0x%x\n",
cqm_handle->func_capability.srqc_number);
cqm_info(handle->dev_hdl, "Cap init: mpt_number 0x%x, mpt_reserved 0x%x\n",
cqm_handle->func_capability.mpt_number,
cqm_handle->func_capability.mpt_reserved);
cqm_info(handle->dev_hdl, "Cap init: gid_number 0x%x, lun_number 0x%x\n",
cqm_handle->func_capability.gid_number,
cqm_handle->func_capability.lun_number);
cqm_info(handle->dev_hdl, "Cap init: taskmap_number 0x%x, l3i_number 0x%x\n",
cqm_handle->func_capability.taskmap_number,
cqm_handle->func_capability.l3i_number);
cqm_info(handle->dev_hdl, "Cap init: timer_number 0x%x\n",
cqm_handle->func_capability.timer_number);
cqm_info(handle->dev_hdl, "Cap init: xid2cid_number 0x%x, reorder_number 0x%x\n",
cqm_handle->func_capability.xid2cid_number,
cqm_handle->func_capability.reorder_number);
return CQM_SUCCESS;
out:
if (cqm_handle->func_attribute.func_type == CQM_PPF)
cqm_handle->func_capability.timer_enable = 0;
return err;
}
/**
* cqm_init - Initialize cqm
* @ex_handle: handle of hwdev
*/
s32 cqm_init(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
s32 ret = CQM_FAIL;
CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
cqm_handle = (struct cqm_handle_s *)kmalloc(sizeof(struct cqm_handle_s),
GFP_KERNEL | __GFP_ZERO);
CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL,
CQM_ALLOC_FAIL(cqm_handle));
/* Clear memory to prevent other systems' memory from being cleared */
memset(cqm_handle, 0, sizeof(struct cqm_handle_s));
cqm_handle->ex_handle = handle;
cqm_handle->dev = (struct pci_dev *)(handle->pcidev_hdl);
handle->cqm_hdl = (void *)cqm_handle;
/* Clear statistics */
memset(&handle->hw_stats.cqm_stats, 0, sizeof(struct hifc_cqm_stats));
/* Read information of vf or pf */
cqm_handle->func_attribute = handle->hwif->attr;
cqm_info(handle->dev_hdl, "Func init: function type %d\n",
cqm_handle->func_attribute.func_type);
/* Read ability from configuration management module */
ret = cqm_capability_init(ex_handle);
if (ret == CQM_FAIL) {
cqm_err(handle->dev_hdl,
CQM_FUNCTION_FAIL(cqm_capability_init));
goto err1;
}
/* Initialize entries of memory table such as BAT/CLA/bitmap */
if (cqm_mem_init(ex_handle) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init));
goto err1;
}
/* Initialize event callback */
if (cqm_event_init(ex_handle) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_event_init));
goto err2;
}
/* Initialize doorbell */
if (cqm_db_init(ex_handle) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_init));
goto err3;
}
/* The timer bitmap is set directly from the beginning through CQM,
* no longer set/clear the bitmap through ifconfig up/down
*/
if (hifc_func_tmr_bitmap_set(ex_handle, 1) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, "Timer start: enable timer bitmap failed\n");
goto err5;
}
return CQM_SUCCESS;
err5:
cqm_db_uninit(ex_handle);
err3:
cqm_event_uninit(ex_handle);
err2:
cqm_mem_uninit(ex_handle);
err1:
handle->cqm_hdl = NULL;
kfree(cqm_handle);
return CQM_FAIL;
}
/**
* cqm_uninit - Deinitialize the cqm, and is called once removing a function
* @ex_handle: handle of hwdev
*/
void cqm_uninit(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
s32 ret = CQM_FAIL;
CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return);
/* The timer bitmap is set directly from the beginning through CQM,
* no longer set/clear the bitmap through ifconfig up/down
*/
cqm_info(handle->dev_hdl, "Timer stop: disable timer\n");
if (hifc_func_tmr_bitmap_set(ex_handle, 0) != CQM_SUCCESS)
cqm_err(handle->dev_hdl, "Timer stop: disable timer bitmap failed\n");
/* Stopping timer, release the resource
* after a delay of one or two milliseconds
*/
if ((cqm_handle->func_attribute.func_type == CQM_PPF) &&
(cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE)) {
cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop\n");
ret = hifc_ppf_tmr_stop(handle);
if (ret != CQM_SUCCESS) {
cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop, ret=%d\n",
ret);
/* The timer fails to stop
* and does not affect resource release
*/
}
usleep_range(900, 1000);
}
/* Release hardware doorbell */
cqm_db_uninit(ex_handle);
/* Cancel the callback of chipif */
cqm_event_uninit(ex_handle);
/* Release all table items
* and require the service to release all objects
*/
cqm_mem_uninit(ex_handle);
/* Release cqm_handle */
handle->cqm_hdl = NULL;
kfree(cqm_handle);
}
/**
* cqm_mem_init - Initialize related memory of cqm,
* including all levels of entries
* @ex_handle: handle of hwdev
*/
s32 cqm_mem_init(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
if (cqm_bat_init(cqm_handle) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_init));
return CQM_FAIL;
}
if (cqm_cla_init(cqm_handle) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init));
goto err1;
}
if (cqm_bitmap_init(cqm_handle) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_init));
goto err2;
}
if (cqm_object_table_init(cqm_handle) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl,
CQM_FUNCTION_FAIL(cqm_object_table_init));
goto err3;
}
return CQM_SUCCESS;
err3:
cqm_bitmap_uninit(cqm_handle);
err2:
cqm_cla_uninit(cqm_handle);
err1:
cqm_bat_uninit(cqm_handle);
return CQM_FAIL;
}
/**
* cqm_mem_uninit - Deinitialize related memory of cqm,
* including all levels of entries
* @ex_handle: handle of hwdev
*/
void cqm_mem_uninit(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
cqm_object_table_uninit(cqm_handle);
cqm_bitmap_uninit(cqm_handle);
cqm_cla_uninit(cqm_handle);
cqm_bat_uninit(cqm_handle);
}
/**
* cqm_event_init - Initialize the event callback of cqm
* @ex_handle: handle of hwdev
*/
s32 cqm_event_init(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
/* Register ceq and aeq callbacks with chipif */
if (hifc_aeq_register_swe_cb(ex_handle,
HIFC_STATEFULL_EVENT,
cqm_aeq_callback) != CHIPIF_SUCCESS) {
cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n");
return CQM_FAIL;
}
return CQM_SUCCESS;
}
/**
* cqm_event_uninit - Deinitialize the event callback of cqm
* @ex_handle: handle of hwdev
*/
void cqm_event_uninit(void *ex_handle)
{
(void)hifc_aeq_unregister_swe_cb(ex_handle, HIFC_STATEFULL_EVENT);
}
/**
* cqm_db_addr_alloc - Apply for a page of hardware doorbell and dwqe,
* with the same index, all obtained are physical addresses
* each function has up to 1K
* @ex_handle: handle of hwdev
* @db_addr: the address of doorbell
* @dwqe_addr: the address of dwqe
*/
s32 cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr,
void __iomem **dwqe_addr)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
CQM_PTR_CHECK_RET(db_addr, return CQM_FAIL, CQM_PTR_NULL(db_addr));
CQM_PTR_CHECK_RET(dwqe_addr, return CQM_FAIL, CQM_PTR_NULL(dwqe_addr));
atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_alloc_cnt);
return hifc_alloc_db_addr(ex_handle, db_addr, dwqe_addr);
}
/**
* cqm_db_init - Initialize doorbell of cqm
* @ex_handle: handle of hwdev
*/
s32 cqm_db_init(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
struct cqm_service_s *service = NULL;
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
/* Assign hardware doorbell for service */
service = &cqm_handle->service;
if (cqm_db_addr_alloc(ex_handle,
&service->hardware_db_vaddr,
&service->dwqe_vaddr) != CQM_SUCCESS) {
cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_addr_alloc));
return CQM_FAIL;
}
return CQM_SUCCESS;
}
/**
* cqm_db_addr_free - Release a page of hardware doorbell and dwqe
* @ex_handle: handle of hwdev
* @db_addr: the address of doorbell
* @dwqe_addr: the address of dwqe
*/
void cqm_db_addr_free(void *ex_handle, void __iomem *db_addr,
void __iomem *dwqe_addr)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_free_cnt);
hifc_free_db_addr(ex_handle, db_addr, dwqe_addr);
}
/**
* cqm_db_uninit - Deinitialize doorbell of cqm
* @ex_handle: handle of hwdev
*/
void cqm_db_uninit(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
struct cqm_service_s *service = NULL;
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
/* Release hardware doorbell */
service = &cqm_handle->service;
cqm_db_addr_free(ex_handle, service->hardware_db_vaddr,
service->dwqe_vaddr);
}
/**
* cqm_aeq_callback - cqm module callback processing of aeq
* @ex_handle: handle of hwdev
* @event: the input type of event
* @data: the input data
*/
u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data)
{
#define CQM_AEQ_BASE_T_FC 48
#define CQM_AEQ_BASE_T_FCOE 56
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
struct cqm_service_s *service = NULL;
struct service_register_template_s *service_template = NULL;
u8 event_level = FAULT_LEVEL_MAX;
CQM_PTR_CHECK_RET(ex_handle, return event_level,
CQM_PTR_NULL(ex_handle));
atomic_inc(&handle->hw_stats.cqm_stats.cqm_aeq_callback_cnt[event]);
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
CQM_PTR_CHECK_RET(cqm_handle, return event_level,
CQM_PTR_NULL(cqm_handle));
if (event >= (u8)CQM_AEQ_BASE_T_FC &&
(event < (u8)CQM_AEQ_BASE_T_FCOE)) {
service = &cqm_handle->service;
service_template = &service->service_template;
if (!service_template->aeq_callback) {
cqm_err(handle->dev_hdl, "Event: service aeq_callback unregistered\n");
} else {
service_template->aeq_callback(
service_template->service_handle, event, data);
}
return event_level;
}
cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(event));
return CQM_FAIL;
}
/**
* cqm_service_register - Service driver registers callback template with cqm
* @ex_handle: handle of hwdev
* @service_template: the template of service registration
*/
s32 cqm_service_register(void *ex_handle,
struct service_register_template_s *service_template)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
struct cqm_service_s *service = NULL;
CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL,
CQM_PTR_NULL(cqm_handle));
CQM_PTR_CHECK_RET(service_template, return CQM_FAIL,
CQM_PTR_NULL(service_template));
service = &cqm_handle->service;
if (service->has_register == true) {
cqm_err(handle->dev_hdl, "Service register: service has registered\n");
return CQM_FAIL;
}
service->has_register = true;
(void)memcpy((void *)(&service->service_template),
(void *)service_template,
sizeof(struct service_register_template_s));
return CQM_SUCCESS;
}
/**
* cqm_service_unregister - Service-driven cancellation to CQM
* @ex_handle: handle of hwdev
* @service_type: the type of service module
*/
void cqm_service_unregister(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
struct cqm_handle_s *cqm_handle = NULL;
struct cqm_service_s *service = NULL;
CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return);
service = &cqm_handle->service;
service->has_register = false;
memset(&service->service_template, 0,
sizeof(struct service_register_template_s));
}
/**
* cqm_cmd_alloc - Apply for a cmd buffer, the buffer size is fixed at 2K,
* the buffer content is not cleared, but the service needs to be cleared
* @ex_handle: handle of hwdev
*/
struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle));
atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_alloc_cnt);
return (struct cqm_cmd_buf_s *)hifc_alloc_cmd_buf(ex_handle);
}
/**
* cqm_cmd_free - Free a cmd buffer
* @ex_handle: handle of hwdev
* @cmd_buf: the cmd buffer which needs freeing memory for
*/
void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return);
CQM_PTR_CHECK_NO_RET(cmd_buf, CQM_PTR_NULL(cmd_buf), return);
CQM_PTR_CHECK_NO_RET(cmd_buf->buf, CQM_PTR_NULL(buf), return);
atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_free_cnt);
hifc_free_cmd_buf(ex_handle, (struct hifc_cmd_buf *)cmd_buf);
}
/**
* cqm_send_cmd_box - Send a cmd in box mode,
* the interface will hang the completed amount, causing sleep
* @ex_handle: handle of hwdev
* @ack_type: the type of ack
* @mod: the mode of cqm send
* @cmd: the input cmd
* @buf_in: the input buffer of cqm_cmd
* @buf_out: the output buffer of cqm_cmd
* @timeout: exceeding the time limit will cause sleep
*/
s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd,
struct cqm_cmd_buf_s *buf_in,
struct cqm_cmd_buf_s *buf_out, u32 timeout)
{
struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle;
CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle));
CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_PTR_NULL(buf_in));
CQM_PTR_CHECK_RET(buf_in->buf, return CQM_FAIL, CQM_PTR_NULL(buf));
atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt);
return hifc_cmdq_detail_resp(ex_handle, ack_type, mod, cmd,
(struct hifc_cmd_buf *)buf_in,
(struct hifc_cmd_buf *)buf_out, timeout);
}
/**
* cqm_ring_hardware_db - Knock hardware doorbell
* @ex_handle: handle of hwdev
* @service_type: each kernel mode will be allocated a page of hardware doorbell
* @db_count: PI exceeding 64b in doorbell[7:0]
* @db: doorbell content, organized by the business,
* if there is a small-end conversion, the business needs to be completed
*/
s32 cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db)
{
struct hifc_hwdev *handle;
struct cqm_handle_s *cqm_handle;
struct cqm_service_s *service;
handle = (struct hifc_hwdev *)ex_handle;
cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl);
service = &cqm_handle->service;
/* Write all before the doorbell */
wmb();
*((u64 *)service->hardware_db_vaddr + db_count) = db;
return CQM_SUCCESS;
}

366
hifc/hifc_cqm_main.h Normal file
View File

@ -0,0 +1,366 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __CQM_MAIN_H__
#define __CQM_MAIN_H__
#define CHIPIF_SUCCESS 0
#define CQM_TIMER_ENABLE 1
enum cqm_object_type_e {
CQM_OBJECT_ROOT_CTX = 0,
CQM_OBJECT_SERVICE_CTX,
CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10,
CQM_OBJECT_NONRDMA_EMBEDDED_SQ,
CQM_OBJECT_NONRDMA_SRQ,
CQM_OBJECT_NONRDMA_EMBEDDED_CQ,
CQM_OBJECT_NONRDMA_SCQ,
};
struct service_register_template_s {
u32 service_type;
u32 srq_ctx_size; /* srq,scq context_size config */
u32 scq_ctx_size;
void *service_handle; /* ceq/aeq callback fun */
void (*aeq_callback)(void *service_handle, u8 event_type, u64 val);
};
struct cqm_service_s {
bool has_register;
void __iomem *hardware_db_vaddr;
void __iomem *dwqe_vaddr;
u32 buf_order; /* size of per buf 2^buf_order page */
struct service_register_template_s service_template;
};
struct cqm_func_capability_s {
bool qpc_alloc_static; /* Allocate qpc memory dynamicly/statically */
bool scqc_alloc_static;
u8 timer_enable; /* whether timer enable */
u32 flow_table_based_conn_number;
u32 flow_table_based_conn_cache_number; /* Maximum number in cache */
u32 bloomfilter_length; /* Bloomfilter table size, aligned by 64B */
/* The starting position of the bloomfilter table in the cache */
u32 bloomfilter_addr;
u32 qpc_reserved; /* Reserved bits in bitmap */
u32 mpt_reserved; /* There are also reserved bits in ROCE/IWARP mpt */
/* All basic_size must be 2^n aligned */
u32 hash_number;
/* Number of hash buckets, BAT table fill size is
* aligned with 64 buckets, at least 64
*/
u32 hash_basic_size;
/* Hash bucket size is 64B, including 5 valid
* entries and 1 nxt_entry
*/
u32 qpc_number;
u32 qpc_basic_size;
/* Note: for cqm specail test */
u32 pagesize_reorder;
bool xid_alloc_mode;
bool gpa_check_enable;
u32 scq_reserved;
u32 mpt_number;
u32 mpt_basic_size;
u32 scqc_number;
u32 scqc_basic_size;
u32 srqc_number;
u32 srqc_basic_size;
u32 gid_number;
u32 gid_basic_size;
u32 lun_number;
u32 lun_basic_size;
u32 taskmap_number;
u32 taskmap_basic_size;
u32 l3i_number;
u32 l3i_basic_size;
u32 childc_number;
u32 childc_basic_size;
u32 child_qpc_id_start; /* Child ctx of FC is global addressing */
/* The maximum number of child ctx in
* chip is 8096
*/
u32 childc_number_all_function;
u32 timer_number;
u32 timer_basic_size;
u32 xid2cid_number;
u32 xid2cid_basic_size;
u32 reorder_number;
u32 reorder_basic_size;
};
#define CQM_PF TYPE_PF
#define CQM_PPF TYPE_PPF
#define CQM_BAT_ENTRY_MAX (16)
#define CQM_BAT_ENTRY_SIZE (16)
struct cqm_buf_list_s {
void *va;
dma_addr_t pa;
u32 refcount;
};
struct cqm_buf_s {
struct cqm_buf_list_s *buf_list;
struct cqm_buf_list_s direct;
u32 page_number; /* page_number=2^n buf_number */
u32 buf_number; /* buf_list node count */
u32 buf_size; /* buf_size=2^n PAGE_SIZE */
};
struct cqm_bitmap_s {
ulong *table;
u32 max_num;
u32 last;
/* The index that cannot be allocated is reserved in the front */
u32 reserved_top;
/* Lock for bitmap allocation */
spinlock_t lock;
};
struct completion;
struct cqm_object_s {
u32 service_type;
u32 object_type; /* context,queue,mpt,mtt etc */
u32 object_size;
/* for queue, ctx, MPT Byte */
atomic_t refcount;
struct completion free;
void *cqm_handle;
};
struct cqm_object_table_s {
struct cqm_object_s **table;
u32 max_num;
rwlock_t lock;
};
struct cqm_cla_table_s {
u32 type;
u32 max_buffer_size;
u32 obj_num;
bool alloc_static; /* Whether the buffer is statically allocated */
u32 cla_lvl;
/* The value of x calculated by the cacheline, used for chip */
u32 cacheline_x;
/* The value of y calculated by the cacheline, used for chip */
u32 cacheline_y;
/* The value of z calculated by the cacheline, used for chip */
u32 cacheline_z;
/* The value of x calculated by the obj_size, used for software */
u32 x;
/* The value of y calculated by the obj_size, used for software */
u32 y;
/* The value of z calculated by the obj_size, used for software */
u32 z;
struct cqm_buf_s cla_x_buf;
struct cqm_buf_s cla_y_buf;
struct cqm_buf_s cla_z_buf;
u32 trunk_order;/* A continuous physical page contains 2^order pages */
u32 obj_size;
/* Lock for cla buffer allocation and free */
struct mutex lock;
struct cqm_bitmap_s bitmap;
/* The association mapping table of index and object */
struct cqm_object_table_s obj_table;
};
typedef void (*init_handler)(void *cqm_handle,
struct cqm_cla_table_s *cla_table,
void *cap);
struct cqm_cla_entry_init_s {
u32 type;
init_handler cqm_cla_init_handler;
};
struct cqm_bat_table_s {
u32 bat_entry_type[CQM_BAT_ENTRY_MAX];
u8 bat[CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE];
struct cqm_cla_table_s entry[CQM_BAT_ENTRY_MAX];
u32 bat_size;
};
struct cqm_handle_s {
struct hifc_hwdev *ex_handle;
struct pci_dev *dev;
struct hifc_func_attr func_attribute; /* vf or pf */
struct cqm_func_capability_s func_capability;
struct cqm_service_s service;
struct cqm_bat_table_s bat_table;
struct list_head node;
};
struct cqm_cmd_buf_s {
void *buf;
dma_addr_t dma;
u16 size;
};
struct cqm_queue_header_s {
u64 doorbell_record;
u64 ci_record;
u64 rsv1; /* the share area bettween driver and ucode */
u64 rsv2; /* the share area bettween driver and ucode*/
};
struct cqm_queue_s {
struct cqm_object_s object;
u32 index; /* embedded queue QP has not index, SRQ and SCQ have */
void *priv; /* service driver private info */
u32 current_q_doorbell;
u32 current_q_room;
/* nonrdma: only select q_room_buf_1 for q_room_buf */
struct cqm_buf_s q_room_buf_1;
struct cqm_buf_s q_room_buf_2;
struct cqm_queue_header_s *q_header_vaddr;
dma_addr_t q_header_paddr;
u8 *q_ctx_vaddr; /* SRQ and SCQ ctx space */
dma_addr_t q_ctx_paddr;
u32 valid_wqe_num;
/*add for srq*/
u8 *tail_container;
u8 *head_container;
u8 queue_link_mode; /*link,ring */
};
struct cqm_nonrdma_qinfo_s {
struct cqm_queue_s common;
u32 wqe_size;
/* The number of wqe contained in each buf (excluding link wqe),
* For srq, it is the number of wqe contained in 1 container
*/
u32 wqe_per_buf;
u32 q_ctx_size;
/* When different services use different sizes of ctx, a large ctx will
* occupy multiple consecutive indexes of the bitmap
*/
u32 index_count;
u32 container_size;
};
/* service context, QPC, mpt */
struct cqm_qpc_mpt_s {
struct cqm_object_s object;
u32 xid;
dma_addr_t paddr;
void *priv; /* service driver private info */
u8 *vaddr;
};
struct cqm_qpc_mpt_info_s {
struct cqm_qpc_mpt_s common;
/* When different services use different sizes of QPC, large QPC/mpt
* will occupy multiple consecutive indexes of the bitmap
*/
u32 index_count;
};
#define CQM_ADDR_COMBINE(high_addr, low_addr) \
((((dma_addr_t)(high_addr)) << 32) + ((dma_addr_t)(low_addr)))
#define CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32))
#define CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff))
#define CQM_HASH_BUCKET_SIZE_64 (64)
#define CQM_LUN_SIZE_8 (8)
#define CQM_L3I_SIZE_8 (8)
#define CQM_TIMER_SIZE_32 (32)
#define CQM_LUN_FC_NUM (64)
#define CQM_TASKMAP_FC_NUM (4)
#define CQM_L3I_COMM_NUM (64)
#define CQM_TIMER_SCALE_NUM (2*1024)
#define CQM_TIMER_ALIGN_WHEEL_NUM (8)
#define CQM_TIMER_ALIGN_SCALE_NUM \
(CQM_TIMER_SCALE_NUM*CQM_TIMER_ALIGN_WHEEL_NUM)
#define CQM_FC_PAGESIZE_ORDER (0)
#define CQM_QHEAD_ALIGN_ORDER (6)
s32 cqm_mem_init(void *ex_handle);
void cqm_mem_uninit(void *ex_handle);
s32 cqm_event_init(void *ex_handle);
void cqm_event_uninit(void *ex_handle);
s32 cqm_db_init(void *ex_handle);
void cqm_db_uninit(void *ex_handle);
s32 cqm_init(void *ex_handle);
void cqm_uninit(void *ex_handle);
s32 cqm_service_register(void *ex_handle,
struct service_register_template_s *service_template);
void cqm_service_unregister(void *ex_handle);
s32 cqm_ring_hardware_db(void *ex_handle,
u32 service_type,
u8 db_count, u64 db);
s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd,
struct cqm_cmd_buf_s *buf_in,
struct cqm_cmd_buf_s *buf_out,
u32 timeout);
u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data);
void cqm_object_delete(struct cqm_object_s *object);
struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle);
void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf);
struct cqm_queue_s *cqm_object_fc_srq_create(
void *ex_handle,
enum cqm_object_type_e object_type,
u32 wqe_number,
u32 wqe_size,
void *object_priv);
struct cqm_qpc_mpt_s *cqm_object_qpc_mpt_create(
void *ex_handle,
enum cqm_object_type_e object_type,
u32 object_size,
void *object_priv,
u32 index);
struct cqm_queue_s *cqm_object_nonrdma_queue_create(
void *ex_handle,
enum cqm_object_type_e object_type,
u32 wqe_number,
u32 wqe_size,
void *object_priv);
#define CQM_PTR_NULL(x) "%s: "#x" is null\n", __func__
#define CQM_ALLOC_FAIL(x) "%s: "#x" alloc fail\n", __func__
#define CQM_MAP_FAIL(x) "%s: "#x" map fail\n", __func__
#define CQM_FUNCTION_FAIL(x) "%s: "#x" return failure\n", __func__
#define CQM_WRONG_VALUE(x) "%s: "#x" %u is wrong\n", __func__, (u32)x
#define cqm_err(dev, format, ...) \
dev_err(dev, "[CQM]"format, ##__VA_ARGS__)
#define cqm_warn(dev, format, ...) \
dev_warn(dev, "[CQM]"format, ##__VA_ARGS__)
#define cqm_notice(dev, format, ...) \
dev_notice(dev, "[CQM]"format, ##__VA_ARGS__)
#define cqm_info(dev, format, ...) \
dev_info(dev, "[CQM]"format, ##__VA_ARGS__)
#define cqm_dbg(format, ...)
#define CQM_PTR_CHECK_RET(ptr, ret, desc) \
do {\
if (unlikely(NULL == (ptr))) {\
pr_err("[CQM]"desc);\
ret; \
} \
} while (0)
#define CQM_PTR_CHECK_NO_RET(ptr, desc, ret) \
do {\
if (unlikely((ptr) == NULL)) {\
pr_err("[CQM]"desc);\
ret; \
} \
} while (0)
#define CQM_CHECK_EQUAL_RET(dev_hdl, actual, expect, ret, desc) \
do {\
if (unlikely((expect) != (actual))) {\
cqm_err(dev_hdl, desc);\
ret; \
} \
} while (0)
#endif /* __CQM_MAIN_H__ */

3599
hifc/hifc_cqm_object.c Normal file

File diff suppressed because it is too large Load Diff

244
hifc/hifc_cqm_object.h Normal file
View File

@ -0,0 +1,244 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __CQM_OBJECT_H__
#define __CQM_OBJECT_H__
#define CLA_TABLE_PAGE_ORDER (0)
#define CQM_4K_PAGE_ORDER (0)
#define CQM_CQ_DEPTH_MAX (32768)
#define CQM_CQ_DEPTH_MIN (256)
#define CQM_BAT_SIZE_FT_PF (192)
#define CQM_WQE_WF_LINK 1
#define CQM_WQE_WF_NORMAL 0
#define CQM_QUEUE_LINK_MODE 0
#define CQM_QUEUE_RING_MODE 1
#define CQM_4K_PAGE_SIZE 4096
#define CQM_SUCCESS 0
#define CQM_FAIL -1
#define CQM_QUEUE_TOE_SRQ_LINK_MODE 2
#define CQM_CMD_TIMEOUT 10000 /*ms*/
#define CQM_INDEX_INVALID ~(0U)
#define CQM_INDEX_RESERVED (0xfffff) /* reserved by cqm alloc */
enum cqm_bat_entry_type_e {
CQM_BAT_ENTRY_T_CFG = 0,
CQM_BAT_ENTRY_T_HASH,
CQM_BAT_ENTRY_T_QPC,
CQM_BAT_ENTRY_T_SCQC,
CQM_BAT_ENTRY_T_SRQC,
CQM_BAT_ENTRY_T_MPT,
CQM_BAT_ENTRY_T_GID,
CQM_BAT_ENTRY_T_LUN,
CQM_BAT_ENTRY_T_TASKMAP,
CQM_BAT_ENTRY_T_L3I,
CQM_BAT_ENTRY_T_CHILDC,
CQM_BAT_ENTRY_T_TIMER,
CQM_BAT_ENTRY_T_XID2CID,
CQM_BAT_ENTRY_T_REORDER,
CQM_BAT_ENTRY_T_INVALID = 0xff,
};
enum cqm_cmd_type_e {
CQM_CMD_T_INVALID = 0,
CQM_CMD_T_BAT_UPDATE,
CQM_CMD_T_CLA_UPDATE,
CQM_CMD_T_BLOOMFILTER_SET,
CQM_CMD_T_BLOOMFILTER_CLEAR,
CQM_CMD_T_COMPACT_SRQ_UPDATE,
CQM_CMD_T_CLA_CACHE_INVALID,
CQM_CMD_T_BLOOMFILTER_INIT,
QM_CMD_T_MAX
};
/*linkwqe*/
#define CQM_LINK_WQE_CTRLSL_VALUE 2
#define CQM_LINK_WQE_LP_VALID 1
#define CQM_LINK_WQE_LP_INVALID 0
#define CQM_LINK_WQE_OWNER_VALID 1
#define CQM_LINK_WQE_OWNER_INVALID 0
/*CLA update mode*/
#define CQM_CLA_RECORD_NEW_GPA 0
#define CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1
#define CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2
#define CQM_CLA_LVL_0 0
#define CQM_CLA_LVL_1 1
#define CQM_CLA_LVL_2 2
#define CQM_MAX_INDEX_BIT 19
#define CQM_CHIP_CACHELINE 256
enum cqm_cmd_ack_type_e {
CQM_CMD_ACK_TYPE_CMDQ = 0, /* ack: write back to cmdq */
CQM_CMD_ACK_TYPE_SHARE_CQN = 1, /* ack report scq by root ctx ctx */
CQM_CMD_ACK_TYPE_APP_CQN = 2 /* ack report scq by parent ctx */
};
struct cqm_bat_entry_cfg_s {
u32 cur_conn_num_h_4 :4;
u32 rsv1 :4;
u32 max_conn_num :20;
u32 rsv2 :4;
u32 max_conn_cache :10;
u32 rsv3 :6;
u32 cur_conn_num_l_16 :16;
u32 bloom_filter_addr :16;
u32 cur_conn_cache :10;
u32 rsv4 :6;
u32 bucket_num :16;
u32 bloom_filter_len :16;
};
#define CQM_BAT_NO_BYPASS_CACHE 0
#define CQM_BAT_ENTRY_SIZE_256 0
#define CQM_BAT_ENTRY_SIZE_512 1
#define CQM_BAT_ENTRY_SIZE_1024 2
struct cqm_bat_entry_standerd_s {
u32 entry_size :2;
u32 rsv1 :6;
u32 max_number :20;
u32 rsv2 :4;
u32 cla_gpa_h :32;
u32 cla_gpa_l :32;
u32 rsv3 :8;
u32 z :5;
u32 y :5;
u32 x :5;
u32 rsv24 :1;
u32 bypass :1;
u32 cla_level :2;
u32 rsv5 :5;
};
struct cqm_bat_entry_taskmap_s {
u32 gpa0_h;
u32 gpa0_l;
u32 gpa1_h;
u32 gpa1_l;
u32 gpa2_h;
u32 gpa2_l;
u32 gpa3_h;
u32 gpa3_l;
};
struct cqm_cla_cache_invalid_cmd_s {
u32 gpa_h;
u32 gpa_l;
u32 cache_size;/* CLA cache size=4096B */
};
struct cqm_cla_update_cmd_s {
/* need to update gpa addr */
u32 gpa_h;
u32 gpa_l;
/* update value */
u32 value_h;
u32 value_l;
};
struct cqm_bat_update_cmd_s {
#define CQM_BAT_MAX_SIZE 256
u32 offset; /* byte offset,16Byte aligned */
u32 byte_len; /* max size: 256byte */
u8 data[CQM_BAT_MAX_SIZE];
};
struct cqm_handle_s;
struct cqm_linkwqe_s {
u32 rsv1 :14;
u32 wf :1;
u32 rsv2 :14;
u32 ctrlsl :2;
u32 o :1;
u32 rsv3 :31;
u32 lp :1;
u32 next_page_gpa_h;
u32 next_page_gpa_l;
u32 next_buffer_addr_h;
u32 next_buffer_addr_l;
};
struct cqm_srq_linkwqe_s {
struct cqm_linkwqe_s linkwqe;
/*add by wss for srq*/
u32 current_buffer_gpa_h;
u32 current_buffer_gpa_l;
u32 current_buffer_addr_h;
u32 current_buffer_addr_l;
u32 fast_link_page_addr_h;
u32 fast_link_page_addr_l;
u32 fixed_next_buffer_addr_h;
u32 fixed_next_buffer_addr_l;
};
union cqm_linkwqe_first_64b_s {
struct cqm_linkwqe_s basic_linkwqe;
u32 value[16];
};
struct cqm_linkwqe_second_64b_s {
u32 rsvd0[4];
u32 rsvd1[4];
union {
struct {
u32 rsvd0[3];
u32 rsvd1 :29;
u32 toe_o :1;
u32 resvd2 :2;
} bs;
u32 value[4];
} third_16B;
union {
struct {
u32 rsvd0[2];
u32 rsvd1 :31;
u32 ifoe_o :1;
u32 rsvd2;
} bs;
u32 value[4];
} forth_16B;
};
struct cqm_linkwqe_128b_s {
union cqm_linkwqe_first_64b_s first_64b;
struct cqm_linkwqe_second_64b_s second_64b;
};
s32 cqm_bat_init(struct cqm_handle_s *cqm_handle);
void cqm_bat_uninit(struct cqm_handle_s *cqm_handle);
s32 cqm_cla_init(struct cqm_handle_s *cqm_handle);
void cqm_cla_uninit(struct cqm_handle_s *cqm_handle);
s32 cqm_bitmap_init(struct cqm_handle_s *cqm_handle);
void cqm_bitmap_uninit(struct cqm_handle_s *cqm_handle);
s32 cqm_object_table_init(struct cqm_handle_s *cqm_handle);
void cqm_object_table_uninit(struct cqm_handle_s *cqm_handle);
#endif /* __CQM_OBJECT_H__ */

947
hifc/hifc_dbgtool_knl.c Normal file
View File

@ -0,0 +1,947 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_lld.h"
#include "hifc_tool.h"
#include "hifc_dbgtool_knl.h"
struct ffm_intr_info {
u8 node_id;
/* error level of the interrupt source */
u8 err_level;
/* Classification by interrupt source properties */
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
};
#define DBGTOOL_MSG_MAX_SIZE 2048ULL
#define HIFC_SELF_CMD_UP2PF_FFM 0x26
void *g_card_node_array[MAX_CARD_NUM] = {0};
void *g_hifc_card_vir_addr[MAX_CARD_NUM] = {0};
u64 g_hifc_card_phy_addr[MAX_CARD_NUM] = {0};
/* lock for g_hifc_card_vir_addr */
struct mutex g_hifc_addr_lock;
int g_hifc_card_id;
/* dbgtool character device name, class name, dev path */
#define CHR_DEV_DBGTOOL "hifc_dbgtool_chr_dev"
#define CLASS_DBGTOOL "hifc_dbgtool_class"
#define DBGTOOL_DEV_PATH "/dev/hifc_dbgtool_chr_dev"
struct dbgtool_k_glb_info {
struct semaphore dbgtool_sem;
struct ffm_record_info *ffm;
};
static dev_t dbgtool_dev_id; /* device id */
static struct cdev dbgtool_chr_dev; /* struct of char device */
/*lint -save -e104 -e808*/
static struct class *dbgtool_d_class; /* struct of char class */
/*lint -restore*/
static int g_dbgtool_init_flag;
static int g_dbgtool_ref_cnt;
static int dbgtool_knl_open(struct inode *pnode,
struct file *pfile)
{
return 0;
}
static int dbgtool_knl_release(struct inode *pnode,
struct file *pfile)
{
return 0;
}
static ssize_t dbgtool_knl_read(struct file *pfile,
char __user *ubuf,
size_t size,
loff_t *ppos)
{
return 0;
}
static ssize_t dbgtool_knl_write(struct file *pfile,
const char __user *ubuf,
size_t size,
loff_t *ppos)
{
return 0;
}
static bool is_valid_phy_addr(u64 offset)
{
int i;
for (i = 0; i < MAX_CARD_NUM; i++) {
if (offset == g_hifc_card_phy_addr[i])
return true;
}
return false;
}
int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long vmsize = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
phys_addr_t phy_addr;
if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) {
pr_err("Map size = %lu is bigger than alloc\n", vmsize);
return -EAGAIN;
}
if (offset && !is_valid_phy_addr((u64)offset) &&
!hifc_is_valid_bar_addr((u64)offset)) {
pr_err("offset is invalid");
return -EAGAIN;
}
/* old version of tool set vma->vm_pgoff to 0 */
phy_addr = offset ? offset : g_hifc_card_phy_addr[g_hifc_card_id];
if (!phy_addr) {
pr_err("Card_id = %d physical address is 0\n", g_hifc_card_id);
return -EAGAIN;
}
if (remap_pfn_range(vma, vma->vm_start,
(phy_addr >> PAGE_SHIFT),
vmsize, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/**
* dbgtool_knl_api_cmd_read - used for read operations
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
static long dbgtool_knl_api_cmd_read(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
u8 *cmd;
u16 size;
void *ack;
u16 ack_size;
u32 pf_id;
void *hwdev;
pf_id = para->param.api_rd.pf_id;
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big\n", pf_id);
return -EFAULT;
}
/* obtaining pf_id chipif pointer */
hwdev = g_func_handle_array[pf_id];
if (!hwdev) {
pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id);
return -EFAULT;
}
/* alloc cmd and ack memory */
size = para->param.api_rd.size;
if (para->param.api_rd.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Read cmd size invalid or more than 2K\n");
return -EINVAL;
}
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc read cmd mem fail\n");
return -ENOMEM;
}
ack_size = para->param.api_rd.ack_size;
if (para->param.api_rd.ack_size == 0 ||
ack_size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Read cmd ack size is 0\n");
ret = -ENOMEM;
goto alloc_ack_mem_fail;
}
ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL);
if (!ack) {
pr_err("Alloc read ack mem fail\n");
ret = -ENOMEM;
goto alloc_ack_mem_fail;
}
/* cmd content copied from user-mode */
if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) {
pr_err("Copy cmd from user fail\n");
ret = -EFAULT;
goto copy_user_cmd_fail;
}
/* Invoke the api cmd interface read content*/
ret = hifc_api_cmd_read_ack(hwdev, para->param.api_rd.dest,
cmd, size, ack, ack_size);
if (ret) {
pr_err("Api send single cmd ack fail!\n");
goto api_rd_fail;
}
/* Copy the contents of the ack to the user state */
if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) {
pr_err("Copy ack to user fail\n");
ret = -EFAULT;
}
api_rd_fail:
copy_user_cmd_fail:
kfree(ack);
alloc_ack_mem_fail:
kfree(cmd);
return ret;
}
/**
* dbgtool_knl_api_cmd_write - used for write operations
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
static long dbgtool_knl_api_cmd_write(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
u8 *cmd;
u16 size;
u32 pf_id;
void *hwdev;
pf_id = para->param.api_wr.pf_id;
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big\n", pf_id);
return -EFAULT;
}
/* obtaining chipif pointer according to pf_id */
hwdev = g_func_handle_array[pf_id];
if (!hwdev) {
pr_err("PF id(0x%x) handle null\n", pf_id);
return -EFAULT;
}
/* alloc cmd memory */
size = para->param.api_wr.size;
if (para->param.api_wr.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Write cmd size invalid or more than 2K\n");
return -EINVAL;
}
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc write cmd mem fail\n");
return -ENOMEM;
}
/* cmd content copied from user-mode */
if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) {
pr_err("Copy cmd from user fail\n");
ret = -EFAULT;
goto copy_user_cmd_fail;
}
/* api cmd interface is invoked to write the content */
ret = hifc_api_cmd_write_nack(hwdev, para->param.api_wr.dest,
cmd, size);
if (ret)
pr_err("Api send single cmd nack fail\n");
copy_user_cmd_fail:
kfree(cmd);
return ret;
}
void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx,
void **g_func_handle_array)
{
u32 func_idx;
struct hifc_hwdev *hwdev;
if (!dev_info) {
pr_err("Params error!\n");
return;
}
/* pf at most 16 */
for (func_idx = 0; func_idx < 16; func_idx++) {
hwdev = (struct hifc_hwdev *)g_func_handle_array[func_idx];
dev_info[func_idx].phy_addr = g_hifc_card_phy_addr[card_idx];
if (!hwdev) {
dev_info[func_idx].bar0_size = 0;
dev_info[func_idx].bus = 0;
dev_info[func_idx].slot = 0;
dev_info[func_idx].func = 0;
} else {
dev_info[func_idx].bar0_size =
pci_resource_len
(((struct pci_dev *)hwdev->pcidev_hdl), 0);
dev_info[func_idx].bus =
((struct pci_dev *)
hwdev->pcidev_hdl)->bus->number;
dev_info[func_idx].slot =
PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl)
->devfn);
dev_info[func_idx].func =
PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl)
->devfn);
}
}
}
/**
* dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
static long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para,
void **g_func_handle_array)
{
struct pf_dev_info dev_info[16] = { {0} };
unsigned char *tmp;
int i;
mutex_lock(&g_hifc_addr_lock);
if (!g_hifc_card_vir_addr[g_hifc_card_id]) {
g_hifc_card_vir_addr[g_hifc_card_id] =
(void *)__get_free_pages(GFP_KERNEL,
DBGTOOL_PAGE_ORDER);
if (!g_hifc_card_vir_addr[g_hifc_card_id]) {
pr_err("Alloc dbgtool api chain fail!\n");
mutex_unlock(&g_hifc_addr_lock);
return -EFAULT;
}
memset(g_hifc_card_vir_addr[g_hifc_card_id], 0,
PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
g_hifc_card_phy_addr[g_hifc_card_id] =
virt_to_phys(g_hifc_card_vir_addr[g_hifc_card_id]);
if (!g_hifc_card_phy_addr[g_hifc_card_id]) {
pr_err("phy addr for card %d is 0\n", g_hifc_card_id);
free_pages((unsigned long)g_hifc_card_vir_addr[g_hifc_card_id],
DBGTOOL_PAGE_ORDER);
g_hifc_card_vir_addr[g_hifc_card_id] = NULL;
mutex_unlock(&g_hifc_addr_lock);
return -EFAULT;
}
tmp = g_hifc_card_vir_addr[g_hifc_card_id];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
SetPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
}
mutex_unlock(&g_hifc_addr_lock);
chipif_get_all_pf_dev_info(dev_info, g_hifc_card_id, g_func_handle_array);
/* Copy the dev_info to user mode */
if (copy_to_user(para->param.dev_info, dev_info,
(unsigned int)sizeof(dev_info))) {
pr_err("Copy dev_info to user fail\n");
return -EFAULT;
}
return 0;
}
/**
* dbgtool_knl_ffm_info_rd - Read ffm information
* @para: the dbgtool parameter
* @dbgtool_info: the dbgtool info
* Return: 0 - success, negative - failure
*/
static long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para,
struct dbgtool_k_glb_info *dbgtool_info)
{
/* Copy the ffm_info to user mode */
if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm,
(unsigned int)sizeof(struct ffm_record_info))) {
pr_err("Copy ffm_info to user fail\n");
return -EFAULT;
}
return 0;
}
/**
* dbgtool_knl_ffm_info_clr - Clear FFM information
* @para: unused
* @dbgtool_info: the dbgtool info
*/
static void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para,
struct dbgtool_k_glb_info *dbgtool_info)
{
dbgtool_info->ffm->ffm_num = 0;
}
/**
* dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
static long dbgtool_knl_msg_to_up(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
void *buf_in;
void *buf_out;
u16 out_size;
u8 pf_id;
if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("User data(%d) more than 2KB\n",
para->param.msg2up.in_size);
return -EFAULT;
}
pf_id = para->param.msg2up.pf_id;
/* pf at most 16 */
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id);
return -EFAULT;
}
if (!g_func_handle_array[pf_id]) {
pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id);
return -EFAULT;
}
/* alloc buf_in and buf_out memory, apply for 2K */
buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL);
if (!buf_in) {
pr_err("Alloc buf_in mem fail\n");
return -ENOMEM;
}
buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0);
if (!buf_out) {
pr_err("Alloc buf_out mem fail\n");
ret = -ENOMEM;
goto alloc_buf_out_mem_fail;
}
/* copy buf_in from the user state */
if (copy_from_user(buf_in, para->param.msg2up.buf_in,
(unsigned long)para->param.msg2up.in_size)) {
pr_err("Copy buf_in from user fail\n");
ret = -EFAULT;
goto copy_user_buf_in_fail;
}
out_size = DBGTOOL_MSG_MAX_SIZE;
/* Invoke the pf2up communication interface */
ret = hifc_msg_to_mgmt_sync(g_func_handle_array[pf_id],
para->param.msg2up.mod,
para->param.msg2up.cmd,
buf_in,
para->param.msg2up.in_size,
buf_out,
&out_size,
0);
if (ret)
goto msg_2_up_fail;
/* Copy the out_size and buf_out content to user mode */
if (copy_to_user(para->param.msg2up.out_size, &out_size,
(unsigned int)sizeof(out_size))) {
pr_err("Copy out_size to user fail\n");
ret = -EFAULT;
goto copy_out_size_fail;
}
if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) {
pr_err("Copy buf_out to user fail\n");
ret = -EFAULT;
}
copy_out_size_fail:
msg_2_up_fail:
copy_user_buf_in_fail:
kfree(buf_out);
alloc_buf_out_mem_fail:
kfree(buf_in);
return ret;
}
long hifc_dbgtool_knl_free_mem(int id)
{
unsigned char *tmp;
int i;
mutex_lock(&g_hifc_addr_lock);
if (!g_hifc_card_vir_addr[id]) {
mutex_unlock(&g_hifc_addr_lock);
return 0;
}
tmp = g_hifc_card_vir_addr[id];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
ClearPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
free_pages((unsigned long)g_hifc_card_vir_addr[id], DBGTOOL_PAGE_ORDER);
g_hifc_card_vir_addr[id] = NULL;
g_hifc_card_phy_addr[id] = 0;
mutex_unlock(&g_hifc_addr_lock);
return 0;
}
static int get_card_id_by_name(char *chip_name)
{
struct card_node *card_info = NULL;
int i;
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
if (!strncmp(chip_name, card_info->chip_name, IFNAMSIZ))
break;
}
if (i == MAX_CARD_NUM) {
pr_err("Can't find this card %s\n", chip_name);
return -EFAULT;
}
return i;
}
/*lint -save -e771 -e794*/
static long process_dbgtool_cmd(struct dbgtool_param *param, unsigned int cmd,
int idx)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct card_node *card_info = NULL;
unsigned int real_cmd;
long ret = 0;
g_hifc_card_id = idx;
card_info = (struct card_node *)g_card_node_array[idx];
dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
down(&dbgtool_info->dbgtool_sem);
real_cmd = _IOC_NR(cmd);
switch (real_cmd) {
case DBGTOOL_CMD_API_RD:
ret = dbgtool_knl_api_cmd_read(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_API_WR:
ret = dbgtool_knl_api_cmd_write(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_FFM_RD:
ret = dbgtool_knl_ffm_info_rd(param, dbgtool_info);
break;
case DBGTOOL_CMD_FFM_CLR:
dbgtool_knl_ffm_info_clr(param, dbgtool_info);
break;
case DBGTOOL_CMD_PF_DEV_INFO_GET:
ret = dbgtool_knl_pf_dev_info_get(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_MSG_2_UP:
ret = dbgtool_knl_msg_to_up(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_FREE_MEM:
ret = hifc_dbgtool_knl_free_mem(idx);
break;
default:
pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd);
ret = -EFAULT;
}
up(&dbgtool_info->dbgtool_sem);
return ret;
}
/**
* dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry
* @pfile: the pointer to file
* @cmd: the command type
* @arg: user space
* Return: 0 - success, negative - failure
*/
static long dbgtool_knl_unlocked_ioctl(struct file *pfile,
unsigned int cmd,
unsigned long arg)
{
struct dbgtool_param param;
int idx;
(void)memset(&param, 0, sizeof(param));
if (copy_from_user(&param, (void *)arg, sizeof(param))) {
pr_err("Copy param from user fail\n");
return -EFAULT;
}
param.chip_name[IFNAMSIZ - 1] = '\0';
idx = get_card_id_by_name(param.chip_name);
if (idx < 0)
return -EFAULT;
return process_dbgtool_cmd(&param, cmd, idx);
}
static struct card_node *get_card_node_by_hwdev(const void *handle)
{
struct card_node *card_info = NULL;
bool flag = false;
int i, j;
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
for (j = 0; j < MAX_FUNCTION_NUM; j++) {
if (handle == card_info->func_handle_array[j]) {
flag = true;
break;
}
}
if (flag)
break;
}
if (i == MAX_CARD_NUM) {
pr_err("Id(%d) cant find this card\n", i);
return NULL;
}
return card_info;
}
/**
* ffm_intr_msg_record - FFM interruption records sent up
* @handle: the function handle
* @buf_in: the pointer to input buffer
* @in_size: input buffer size
* @buf_out: the pointer to outputput buffer
* @out_size: output buffer size
*/
static void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct ffm_intr_info *intr;
u32 ffm_idx;
struct tm tm;
struct card_node *card_info = NULL;
card_info = get_card_node_by_hwdev(handle);
if (!card_info)
return;
dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
if (!dbgtool_info) {
pr_err("Dbgtool info is null\n");
return;
}
intr = (struct ffm_intr_info *)buf_in;
if (!dbgtool_info->ffm)
return;
ffm_idx = dbgtool_info->ffm->ffm_num;
if (ffm_idx < FFM_RECORD_NUM_MAX) {
pr_info("%s: recv intr, ffm_idx: %d\n", __func__, ffm_idx);
dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id;
dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level;
dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type;
dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr =
intr->err_csr_addr;
dbgtool_info->ffm->ffm[ffm_idx].err_csr_value =
intr->err_csr_value;
/* Calculate the time in date value to tm */
time64_to_tm(ktime_to_ms(ktime_get_real()) / MSEC_PER_SEC, 0, &tm);
/* tm_year starts from 1900; 0->1900, 1->1901, and so on */
dbgtool_info->ffm->ffm[ffm_idx].year =
(u16)(tm.tm_year + 1900);
/* tm_mon starts from 0, 0 indicates January, and so on */
dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)tm.tm_mon + 1;
dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)tm.tm_mday;
dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)tm.tm_hour;
dbgtool_info->ffm->ffm[ffm_idx].min = (u8)tm.tm_min;
dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)tm.tm_sec;
dbgtool_info->ffm->ffm_num++;
}
}
/*lint -restore*/
/*lint -save -e785 -e438*/
static const struct file_operations dbgtool_file_operations = {
.owner = THIS_MODULE,
.open = dbgtool_knl_open,
.release = dbgtool_knl_release,
.read = dbgtool_knl_read,
.write = dbgtool_knl_write,
.unlocked_ioctl = dbgtool_knl_unlocked_ioctl,
.mmap = hifc_mem_mmap,
};
static int dbgtool_create_cdev(void)
{
struct device *pdevice;
int ret = 0;
/* alloc device id */
ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL);
if (ret) {
pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret);
return ret;
}
cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations);
ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1);
if (ret) {
pr_err("Add dgbtool dev fail, ret=0x%x\n", ret);
goto cdev_add_fail;
}
/*lint -save -e160*/
dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL);
/*lint -restore*/
if (IS_ERR(dbgtool_d_class)) {
pr_err("Create dgbtool class fail\n");
ret = -EFAULT;
goto cls_create_fail;
}
/* Export device information to user space
* (/sys/class/class name/device name)
*/
pdevice = device_create(dbgtool_d_class, NULL,
dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL);
if (IS_ERR(pdevice)) {
pr_err("Create dgbtool device fail\n");
ret = -EFAULT;
goto dev_create_fail;
}
return 0;
dev_create_fail:
class_destroy(dbgtool_d_class);
cls_create_fail:
cdev_del(&(dbgtool_chr_dev));
cdev_add_fail:
unregister_chrdev_region(dbgtool_dev_id, 1);
return ret;
}
/**
* hifc_dbgtool_knl_init - dbgtool character device init
* @hwdev: the pointer to hardware device
* @chip_node: the pointer to card node
* Return: 0 - success, negative - failure
*/
int hifc_dbgtool_knl_init(void *vhwdev, void *chip_node)
{
struct card_node *chip_info = (struct card_node *)chip_node;
struct dbgtool_k_glb_info *dbgtool_info;
struct hifc_hwdev *hwdev = vhwdev;
int ret = 0;
int id;
if (hifc_func_type(hwdev) == TYPE_VF)
return 0;
ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
if (ret) {
pr_err("Failed to sysfs create file\n");
return ret;
}
chip_info->func_handle_array[hifc_global_func_id(hwdev)] = hwdev;
hifc_comm_recv_mgmt_self_cmd_reg(hwdev, HIFC_SELF_CMD_UP2PF_FFM,
ffm_intr_msg_record);
if (chip_info->dbgtool_info) {
chip_info->func_num++;
return 0;
}
dbgtool_info = (struct dbgtool_k_glb_info *)
kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL);
if (!dbgtool_info) {
pr_err("Failed to allocate dbgtool_info\n");
ret = -EFAULT;
goto dbgtool_info_fail;
}
chip_info->dbgtool_info = dbgtool_info;
/* FFM init */
dbgtool_info->ffm = (struct ffm_record_info *)
kzalloc(sizeof(struct ffm_record_info),
GFP_KERNEL);
if (!dbgtool_info->ffm) {
pr_err("Failed to allocate cell contexts for a chain\n");
ret = -EFAULT;
goto dbgtool_info_ffm_fail;
}
sema_init(&dbgtool_info->dbgtool_sem, 1);
ret = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id);
if (ret < 0) {
pr_err("Failed to get hifc id\n");
goto sscanf_chdev_fail;
}
g_card_node_array[id] = chip_info;
chip_info->func_num++;
if (g_dbgtool_init_flag) {
g_dbgtool_ref_cnt++;
/* already initialized */
return 0;
}
ret = dbgtool_create_cdev();
if (ret)
goto alloc_chdev_fail;
g_dbgtool_init_flag = 1;
g_dbgtool_ref_cnt = 1;
mutex_init(&g_hifc_addr_lock);
return 0;
alloc_chdev_fail:
g_card_node_array[id] = NULL;
sscanf_chdev_fail:
kfree(dbgtool_info->ffm);
dbgtool_info_ffm_fail:
kfree(dbgtool_info);
dbgtool_info = NULL;
chip_info->dbgtool_info = NULL;
dbgtool_info_fail:
hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM);
chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL;
sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
return ret;
}
/**
* hifc_dbgtool_knl_deinit - dbgtool character device deinit
* @hwdev: the pointer to hardware device
* @chip_node: the pointer to card node
*/
void hifc_dbgtool_knl_deinit(void *vhwdev, void *chip_node)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct card_node *chip_info = (struct card_node *)chip_node;
int id;
int err;
struct hifc_hwdev *hwdev = vhwdev;
if (hifc_func_type(hwdev) == TYPE_VF)
return;
hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM);
chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL;
sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
chip_info->func_num--;
if (chip_info->func_num)
return;
err = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id);
if (err < 0)
pr_err("Failed to get hifc id\n");
g_card_node_array[id] = NULL;
dbgtool_info = chip_info->dbgtool_info;
/* FFM deinit */
kfree(dbgtool_info->ffm);
dbgtool_info->ffm = NULL;
kfree(dbgtool_info);
chip_info->dbgtool_info = NULL;
(void)hifc_dbgtool_knl_free_mem(id);
if (g_dbgtool_init_flag) {
if ((--g_dbgtool_ref_cnt))
return;
}
if (!dbgtool_d_class)
return;
device_destroy(dbgtool_d_class, dbgtool_dev_id);
class_destroy(dbgtool_d_class);
dbgtool_d_class = NULL;
cdev_del(&(dbgtool_chr_dev));
unregister_chrdev_region(dbgtool_dev_id, 1);
g_dbgtool_init_flag = 0;
}
/*lint -restore*/

107
hifc/hifc_dbgtool_knl.h Normal file
View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __DBGTOOL_KNL_H__
#define __DBGTOOL_KNL_H__
enum dbg_tool_cmd {
DBGTOOL_CMD_API_RD = 0,
DBGTOOL_CMD_API_WR,
DBGTOOL_CMD_FFM_RD,
DBGTOOL_CMD_FFM_CLR,
DBGTOOL_CMD_PF_DEV_INFO_GET,
DBGTOOL_CMD_MSG_2_UP,
DBGTOOL_CMD_FREE_MEM,
DBGTOOL_CMD_NUM
};
struct api_cmd_rd {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
void *ack;
u16 ack_size;
};
struct api_cmd_wr {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
};
struct pf_dev_info {
u64 bar0_size;
u8 bus;
u8 slot;
u8 func;
u64 phy_addr;
};
/* Interrupt at most records, interrupt will be recorded in the FFM */
#define FFM_RECORD_NUM_MAX 64
struct ffm_intr_tm_info {
u8 node_id;
/* error level of the interrupt source */
u8 err_level;
/* Classification by interrupt source properties */
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
u8 sec; /* second*/
u8 min; /* minute */
u8 hour; /* hour */
u8 mday; /* day */
u8 mon; /* month */
u16 year; /* year */
};
struct ffm_record_info {
u32 ffm_num;
struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX];
};
struct msg_2_up {
u8 pf_id; /* which pf sends messages to the up */
u8 mod;
u8 cmd;
void *buf_in;
u16 in_size;
void *buf_out;
u16 *out_size;
};
struct dbgtool_param {
union {
struct api_cmd_rd api_rd;
struct api_cmd_wr api_wr;
struct pf_dev_info *dev_info;
struct ffm_record_info *ffm_rd;
struct msg_2_up msg2up;
} param;
char chip_name[16];
};
#ifndef MAX_CARD_NUM
#define MAX_CARD_NUM 64
#endif
#define DBGTOOL_PAGE_ORDER 10
int hifc_dbgtool_knl_init(void *vhwdev, void *chip_node);
void hifc_dbgtool_knl_deinit(void *vhwdev, void *chip_node);
int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma);
void hifc_chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id,
void **g_func_handle_array);
long hifc_dbgtool_knl_free_mem(int id);
#endif

1346
hifc/hifc_eqs.c Normal file

File diff suppressed because it is too large Load Diff

233
hifc/hifc_eqs.h Normal file
View File

@ -0,0 +1,233 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HIFC_EQS_H
#define HIFC_EQS_H
#define HIFC_MAX_AEQS 3
#define HIFC_MAX_CEQS 32
#define HIFC_EQ_MAX_PAGES 8
#define HIFC_AEQE_SIZE 64
#define HIFC_CEQE_SIZE 4
#define HIFC_AEQE_DESC_SIZE 4
#define HIFC_AEQE_DATA_SIZE \
(HIFC_AEQE_SIZE - HIFC_AEQE_DESC_SIZE)
#define HIFC_DEFAULT_AEQ_LEN 4096
#define HIFC_DEFAULT_CEQ_LEN 8192
#define HIFC_MIN_AEQ_LEN 64
#define HIFC_MAX_AEQ_LEN (512 * 1024)
#define HIFC_MIN_CEQ_LEN 64
#define HIFC_MAX_CEQ_LEN (1024 * 1024)
#define HIFC_CEQ_ID_CMDQ 0
#define EQ_IRQ_NAME_LEN 64
/* EQ registers */
#define HIFC_AEQ_MTT_OFF_BASE_ADDR 0x200
#define HIFC_CEQ_MTT_OFF_BASE_ADDR 0x400
#define HIFC_EQ_MTT_OFF_STRIDE 0x40
#define HIFC_CSR_AEQ_MTT_OFF(id) \
(HIFC_AEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE)
#define HIFC_CSR_CEQ_MTT_OFF(id) \
(HIFC_CEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE)
#define HIFC_CSR_EQ_PAGE_OFF_STRIDE 8
#define HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
(HIFC_CSR_AEQ_MTT_OFF(q_id) + \
(pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE)
#define HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
(HIFC_CSR_AEQ_MTT_OFF(q_id) + \
(pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4)
#define HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
(HIFC_CSR_CEQ_MTT_OFF(q_id) + \
(pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE)
#define HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
(HIFC_CSR_CEQ_MTT_OFF(q_id) + \
(pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4)
#define HIFC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \
((u32)((type == HIFC_AEQ) ? \
HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \
HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num)))
#define HIFC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \
((u32)((type == HIFC_AEQ) ? \
HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \
HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num)))
#define HIFC_AEQ_CTRL_0_ADDR_BASE 0xE00
#define HIFC_AEQ_CTRL_1_ADDR_BASE 0xE04
#define HIFC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08
#define HIFC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C
#define HIFC_EQ_OFF_STRIDE 0x80
#define HIFC_CSR_AEQ_CTRL_0_ADDR(idx) \
(HIFC_AEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
#define HIFC_CSR_AEQ_CTRL_1_ADDR(idx) \
(HIFC_AEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
#define HIFC_CSR_AEQ_CONS_IDX_ADDR(idx) \
(HIFC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
#define HIFC_CSR_AEQ_PROD_IDX_ADDR(idx) \
(HIFC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
#define HIFC_CEQ_CTRL_0_ADDR_BASE 0x1000
#define HIFC_CEQ_CTRL_1_ADDR_BASE 0x1004
#define HIFC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008
#define HIFC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C
#define HIFC_CSR_CEQ_CTRL_0_ADDR(idx) \
(HIFC_CEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
#define HIFC_CSR_CEQ_CTRL_1_ADDR(idx) \
(HIFC_CEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
#define HIFC_CSR_CEQ_CONS_IDX_ADDR(idx) \
(HIFC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
#define HIFC_CSR_CEQ_PROD_IDX_ADDR(idx) \
(HIFC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE)
enum hifc_eq_type {
HIFC_AEQ,
HIFC_CEQ
};
enum hifc_eq_intr_mode {
HIFC_INTR_MODE_ARMED,
HIFC_INTR_MODE_ALWAYS,
};
enum hifc_eq_ci_arm_state {
HIFC_EQ_NOT_ARMED,
HIFC_EQ_ARMED,
};
struct hifc_eq_work {
struct work_struct work;
void *data;
};
struct hifc_ceq_tasklet_data {
void *data;
};
struct hifc_eq {
struct hifc_hwdev *hwdev;
u16 q_id;
enum hifc_eq_type type;
u32 page_size;
u32 orig_page_size;
u32 eq_len;
u32 cons_idx;
u16 wrapped;
u16 elem_size;
u16 num_pages;
u32 num_elem_in_pg;
struct irq_info eq_irq;
char irq_name[EQ_IRQ_NAME_LEN];
dma_addr_t *dma_addr;
u8 **virt_addr;
dma_addr_t *dma_addr_for_free;
u8 **virt_addr_for_free;
struct hifc_eq_work aeq_work;
struct tasklet_struct ceq_tasklet;
struct hifc_ceq_tasklet_data ceq_tasklet_data;
u64 hard_intr_jif;
u64 soft_intr_jif;
};
struct hifc_aeq_elem {
u8 aeqe_data[HIFC_AEQE_DATA_SIZE];
u32 desc;
};
enum hifc_aeq_cb_state {
HIFC_AEQ_HW_CB_REG = 0,
HIFC_AEQ_HW_CB_RUNNING,
HIFC_AEQ_SW_CB_REG,
HIFC_AEQ_SW_CB_RUNNING,
};
struct hifc_aeqs {
struct hifc_hwdev *hwdev;
hifc_aeq_hwe_cb aeq_hwe_cb[HIFC_MAX_AEQ_EVENTS];
hifc_aeq_swe_cb aeq_swe_cb[HIFC_MAX_AEQ_SW_EVENTS];
unsigned long aeq_hw_cb_state[HIFC_MAX_AEQ_EVENTS];
unsigned long aeq_sw_cb_state[HIFC_MAX_AEQ_SW_EVENTS];
struct hifc_eq aeq[HIFC_MAX_AEQS];
u16 num_aeqs;
struct workqueue_struct *workq;
};
enum hifc_ceq_cb_state {
HIFC_CEQ_CB_REG = 0,
HIFC_CEQ_CB_RUNNING,
};
struct hifc_ceqs {
struct hifc_hwdev *hwdev;
hifc_ceq_event_cb ceq_cb[HIFC_MAX_CEQ_EVENTS];
void *ceq_data[HIFC_MAX_CEQ_EVENTS];
unsigned long ceq_cb_state[HIFC_MAX_CEQ_EVENTS];
struct hifc_eq ceq[HIFC_MAX_CEQS];
u16 num_ceqs;
};
int hifc_aeqs_init(struct hifc_hwdev *hwdev, u16 num_aeqs,
struct irq_info *msix_entries);
void hifc_aeqs_free(struct hifc_hwdev *hwdev);
int hifc_ceqs_init(struct hifc_hwdev *hwdev, u16 num_ceqs,
struct irq_info *msix_entries);
void hifc_ceqs_free(struct hifc_hwdev *hwdev);
void hifc_get_ceq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs,
u16 *num_irqs);
void hifc_get_aeq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs,
u16 *num_irqs);
void hifc_dump_aeq_info(struct hifc_hwdev *hwdev);
#endif

1627
hifc/hifc_hba.c Normal file

File diff suppressed because it is too large Load Diff

234
hifc/hifc_hba.h Normal file
View File

@ -0,0 +1,234 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_HBA_H__
#define __HIFC_HBA_H__
#include "unf_common.h"
#include "hifc_queue.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#define HIFC_PCI_VENDOR_ID_MASK (0xffff)
#define HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT 8
#define HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT 255
#define HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT 255
#define HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT 255
#define HIFC_LOWLEVEL_DEFAULT_BB_SCN 0
#define HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE 28081
#define HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE 14100
#define HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE 7000
#define HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE 0x2000
#define HIFC_SMARTIO_WORK_MODE_FC 0x1
#define UNF_FUN_ID_MASK 0x07
#define UNF_HIFC_FC 0x01
#define UNF_HIFC_MAXNPIV_NUM 64
#define HIFC_MAX_COS_NUM 8
#define HIFC_PCI_VENDOR_ID_HUAWEI 0x19e5
#define HIFC_SCQ_CNTX_SIZE 32
#define HIFC_SRQ_CNTX_SIZE 64
#define HIFC_PORT_INIT_TIME_SEC_MAX 1
#define HIFC_PORT_NAME_LABEL "hifc"
#define HIFC_PORT_NAME_STR_LEN 16
#define HIFC_MAX_PROBE_PORT_NUM 64
#define HIFC_PORT_NUM_PER_TABLE 64
#define HIFC_MAX_CARD_NUM 32
#define HIFC_HBA_PORT_MAX_NUM HIFC_MAX_PROBE_PORT_NUM
/* Heart Lost Flag */
#define HIFC_EVENT_HEART_LOST 0
#define HIFC_GET_HBA_PORT_ID(__hba) ((__hba)->port_index)
#define HIFC_HBA_NOT_PRESENT(__hba) ((__hba)->dev_present == UNF_FALSE)
struct hifc_port_cfg_s {
unsigned int port_id; /* Port ID */
unsigned int port_mode; /* Port mode:INI(0x20) TGT(0x10) BOTH(0x30) */
unsigned int port_topology; /* Port topo:0x3:loop,0xc:p2p,0xf:auto */
unsigned int port_alpa; /* Port ALPA */
unsigned int max_queue_depth;/* Max Queue depth Registration to SCSI */
unsigned int sest_num; /* IO burst num:512-4096 */
unsigned int max_login; /* Max Login Session. */
unsigned int node_name_hi; /* nodename high 32 bits */
unsigned int node_name_lo; /* nodename low 32 bits */
unsigned int port_name_hi; /* portname high 32 bits */
unsigned int port_name_lo; /* portname low 32 bits */
/* Port speed 0:auto 4:4Gbps 8:8Gbps 16:16Gbps */
unsigned int port_speed;
unsigned int interrupt_delay; /* Delay times(ms) in interrupt */
unsigned int tape_support; /* tape support */
};
#define HIFC_VER_INFO_SIZE 128
struct hifc_drv_version_s {
char ver[HIFC_VER_INFO_SIZE];
};
struct hifc_card_info_s {
unsigned int card_num : 8;
unsigned int func_num : 8;
unsigned int base_func : 8;
/*
* Card type:UNF_FC_SERVER_BOARD_32_G(6) 32G mode,
* UNF_FC_SERVER_BOARD_16_G(7)16G mode
*/
unsigned int card_type : 8;
};
struct hifc_card_num_manage_s {
int is_removing;
unsigned int port_count;
unsigned long long card_number;
};
struct hifc_led_state_s {
unsigned char green_speed_led;
unsigned char yellow_speed_led;
unsigned char ac_led;
unsigned char reserved;
};
enum hifc_queue_set_stage_e {
HIFC_QUEUE_SET_STAGE_INIT = 0,
HIFC_QUEUE_SET_STAGE_SCANNING,
HIFC_QUEUE_SET_STAGE_FLUSHING,
HIFC_QUEUE_SET_STAGE_FLUSHDONE,
HIFC_QUEUE_SET_STAGE_BUTT
};
struct hifc_srq_delay_info_s {
unsigned char srq_delay_flag; /* Check whether need to delay */
unsigned char root_rq_rcvd_flag;
unsigned short rsd;
spinlock_t srq_lock;
struct unf_frame_pkg_s pkg;
struct delayed_work del_work;
};
struct hifc_fw_ver_detail_s {
unsigned char ucode_ver[HIFC_VER_LEN];
unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char up_ver[HIFC_VER_LEN];
unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char boot_ver[HIFC_VER_LEN];
unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN];
};
/* get wwpn and wwnn */
struct hifc_chip_info_s {
unsigned char work_mode;
unsigned char tape_support;
unsigned long long wwpn;
unsigned long long wwnn;
};
struct hifc_hba_s {
struct pci_dev *pci_dev;
void *hw_dev_handle;
struct fc_service_cap fc_service_cap;
struct hifc_scq_info_s scq_info[HIFC_TOTAL_SCQ_NUM];
struct hifc_srq_info_s els_srq_info;
/* PCI IO Memory */
void __iomem *bar0;
unsigned int bar0_len;
struct hifc_root_info_s root_info;
struct hifc_parent_queue_mgr_s *parent_queue_mgr;
/* Link list Sq WqePage Pool */
struct hifc_sq_wqe_page_pool_s sq_wpg_pool;
enum hifc_queue_set_stage_e q_set_stage;
unsigned int next_clearing_sq;
unsigned int default_sq_id;
/* Port parameters, Obtained through firmware */
unsigned short q_s_max_count;
unsigned char port_type; /* FC Port */
unsigned char port_index; /* Phy Port */
unsigned int default_scqn;
unsigned char chip_type; /* chiptype:Smart or fc */
unsigned char work_mode;
struct hifc_card_info_s card_info;
char port_name[HIFC_PORT_NAME_STR_LEN];
unsigned int probe_index;
unsigned short exit_base;
unsigned short exit_count;
unsigned short image_count;
unsigned char vpid_start;
unsigned char vpid_end;
spinlock_t flush_state_lock;
int in_flushing;
struct hifc_port_cfg_s port_cfg; /* Obtained through Config */
void *lport; /* Used in UNF level */
unsigned char sys_node_name[UNF_WWN_LEN];
unsigned char sys_port_name[UNF_WWN_LEN];
struct completion hba_init_complete;
struct completion mbox_complete;
unsigned short removing;
int sfp_on;
int dev_present;
int heart_status;
spinlock_t hba_lock;
unsigned int port_topo_cfg;
unsigned int port_bbscn_cfg;
unsigned int port_loop_role;
unsigned int port_speed_cfg;
unsigned int max_support_speed;
unsigned char remote_rttov_tag;
unsigned char remote_edtov_tag;
unsigned short compared_bbscn;
unsigned short remote_bbcredit;
unsigned int compared_edtov_val;
unsigned int compared_ratov_val;
enum unf_act_topo_e active_topo;
unsigned int active_port_speed;
unsigned int active_rx_bb_credit;
unsigned int active_bb_scn;
unsigned int phy_link;
unsigned int fcp_conf_cfg;
/* loop */
unsigned char active_al_pa;
unsigned char loop_map_valid;
unsigned char loop_map[UNF_LOOPMAP_COUNT];
unsigned int cos_bit_map;
atomic_t cos_rport_cnt[HIFC_MAX_COS_NUM];
struct hifc_led_state_s led_states;
unsigned int fec_status;
struct workqueue_struct *work_queue;
unsigned long long reset_time;
struct hifc_srq_delay_info_s delay_info;
};
enum drv_port_entity_type_e {
DRV_PORT_ENTITY_TYPE_PHYSICAL = 0,
DRV_PORT_ENTITY_TYPE_VIRTUAL = 1,
DRV_PORT_ENTITY_TYPE_BUTT
};
extern struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM];
extern spinlock_t probe_spin_lock;
extern unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM /
HIFC_PORT_NUM_PER_TABLE];
unsigned int hifc_port_reset(struct hifc_hba_s *v_hba);
void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba);
void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush);
void hifc_get_total_probed_num(unsigned int *v_probe_cnt);
#endif

611
hifc/hifc_hw.h Normal file
View File

@ -0,0 +1,611 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_HW_H_
#define HIFC_HW_H_
#ifndef __BIG_ENDIAN__
#define __BIG_ENDIAN__ 0x4321
#endif
#ifndef __LITTLE_ENDIAN__
#define __LITTLE_ENDIAN__ 0x1234
#endif
enum hifc_mod_type {
HIFC_MOD_COMM = 0, /* HW communication module */
HIFC_MOD_L2NIC = 1, /* L2NIC module*/
HIFC_MOD_FCOE = 6,
HIFC_MOD_CFGM = 7, /* Configuration module */
HIFC_MOD_FC = 10,
HIFC_MOD_HILINK = 14,
HIFC_MOD_HW_MAX = 16, /* hardware max module id */
/* Software module id, for PF/VF and multi-host */
HIFC_MOD_MAX,
};
struct hifc_cmd_buf {
void *buf;
dma_addr_t dma_addr;
u16 size;
};
enum hifc_ack_type {
HIFC_ACK_TYPE_CMDQ,
HIFC_ACK_TYPE_SHARE_CQN,
HIFC_ACK_TYPE_APP_CQN,
HIFC_MOD_ACK_MAX = 15,
};
int hifc_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
void *buf_in, u16 in_size,
void *buf_out, u16 *out_size, u32 timeout);
/* PF/VF send msg to uP by api cmd, and return immediately */
int hifc_msg_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd,
void *buf_in, u16 in_size);
int hifc_api_cmd_write_nack(void *hwdev, u8 dest,
void *cmd, u16 size);
int hifc_api_cmd_read_ack(void *hwdev, u8 dest,
void *cmd, u16 size, void *ack, u16 ack_size);
/* PF/VF send cmd to ucode by cmdq, and return if success.
* timeout=0, use default timeout.
*/
int hifc_cmdq_direct_resp(void *hwdev, enum hifc_ack_type ack_type,
enum hifc_mod_type mod, u8 cmd,
struct hifc_cmd_buf *buf_in,
u64 *out_param, u32 timeout);
/* 1. whether need the timeout parameter
* 2. out_param indicates the status of the microcode processing command
*/
/* PF/VF send cmd to ucode by cmdq, and return detailed result.
* timeout=0, use default timeout.
*/
int hifc_cmdq_detail_resp(void *hwdev, enum hifc_ack_type ack_type,
enum hifc_mod_type mod, u8 cmd,
struct hifc_cmd_buf *buf_in,
struct hifc_cmd_buf *buf_out, u32 timeout);
/* PF/VF send cmd to ucode by cmdq, and return immediately
*/
int hifc_cmdq_async(void *hwdev, enum hifc_ack_type ack_type,
enum hifc_mod_type mod, u8 cmd,
struct hifc_cmd_buf *buf_in);
int hifc_ppf_tmr_start(void *hwdev);
int hifc_ppf_tmr_stop(void *hwdev);
enum hifc_ceq_event {
HIFC_CMDQ = 3,
HIFC_MAX_CEQ_EVENTS = 6,
};
typedef void (*hifc_ceq_event_cb)(void *handle, u32 ceqe_data);
int hifc_ceq_register_cb(void *hwdev, enum hifc_ceq_event event,
hifc_ceq_event_cb callback);
void hifc_ceq_unregister_cb(void *hwdev, enum hifc_ceq_event event);
enum hifc_aeq_type {
HIFC_HW_INTER_INT = 0,
HIFC_MBX_FROM_FUNC = 1,
HIFC_MSG_FROM_MGMT_CPU = 2,
HIFC_API_RSP = 3,
HIFC_API_CHAIN_STS = 4,
HIFC_MBX_SEND_RSLT = 5,
HIFC_MAX_AEQ_EVENTS
};
enum hifc_aeq_sw_type {
HIFC_STATELESS_EVENT = 0,
HIFC_STATEFULL_EVENT = 1,
HIFC_MAX_AEQ_SW_EVENTS
};
typedef void (*hifc_aeq_hwe_cb)(void *handle, u8 *data, u8 size);
int hifc_aeq_register_hw_cb(void *hwdev, enum hifc_aeq_type event,
hifc_aeq_hwe_cb hwe_cb);
void hifc_aeq_unregister_hw_cb(void *hwdev, enum hifc_aeq_type event);
typedef u8 (*hifc_aeq_swe_cb)(void *handle, u8 event, u64 data);
int hifc_aeq_register_swe_cb(void *hwdev, enum hifc_aeq_sw_type event,
hifc_aeq_swe_cb aeq_swe_cb);
void hifc_aeq_unregister_swe_cb(void *hwdev, enum hifc_aeq_sw_type event);
typedef void (*hifc_mgmt_msg_cb)(void *hwdev, void *pri_handle,
u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size);
int hifc_register_mgmt_msg_cb(void *hwdev,
enum hifc_mod_type mod, void *pri_handle,
hifc_mgmt_msg_cb callback);
void hifc_unregister_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod);
struct hifc_cmd_buf *hifc_alloc_cmd_buf(void *hwdev);
void hifc_free_cmd_buf(void *hwdev, struct hifc_cmd_buf *buf);
int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base,
void __iomem **dwqe_base);
void hifc_free_db_addr(void *hwdev, void __iomem *db_base,
void __iomem *dwqe_base);
struct nic_interrupt_info {
u32 lli_set;
u32 interrupt_coalesc_set;
u16 msix_index;
u8 lli_credit_limit;
u8 lli_timer_cfg;
u8 pending_limt;
u8 coalesc_timer_cfg;
u8 resend_timer_cfg;
};
int hifc_get_interrupt_cfg(void *hwdev,
struct nic_interrupt_info *interrupt_info);
int hifc_set_interrupt_cfg(void *hwdev,
struct nic_interrupt_info interrupt_info);
/* The driver code implementation interface*/
void hifc_misx_intr_clear_resend_bit(void *hwdev,
u16 msix_idx, u8 clear_resend_en);
struct hifc_sq_attr {
u8 dma_attr_off;
u8 pending_limit;
u8 coalescing_time;
u8 intr_en;
u16 intr_idx;
u32 l2nic_sqn;
u64 ci_dma_base;
};
int hifc_set_ci_table(void *hwdev, u16 q_id, struct hifc_sq_attr *attr);
int hifc_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz);
int hifc_clean_root_ctxt(void *hwdev);
void hifc_record_pcie_error(void *hwdev);
int hifc_func_rx_tx_flush(void *hwdev);
int hifc_func_tmr_bitmap_set(void *hwdev, bool enable);
struct hifc_init_para {
/* Record hifc_pcidev or NDIS_Adapter pointer address*/
void *adapter_hdl;
/* Record pcidev or Handler pointer address
* for example: ioremap interface input parameter
*/
void *pcidev_hdl;
/* Record pcidev->dev or Handler pointer address which used to
* dma address application or dev_err print the parameter
*/
void *dev_hdl;
void *cfg_reg_base; /* Configure virtual address, bar0/1*/
/* interrupt configuration register address, bar2/3 */
void *intr_reg_base;
u64 db_base_phy;
void *db_base; /* the doorbell address, bar4/5 higher 4M space*/
void *dwqe_mapping;/* direct wqe 4M, follow the doorbell address space*/
void **hwdev;
void *chip_node;
/* In bmgw x86 host, driver can't send message to mgmt cpu directly,
* need to trasmit message ppf mbox to bmgw arm host.
*/
void *ppf_hwdev;
};
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
#define MAX_FUNCTION_NUM 512
#define HIFC_MAX_PF_NUM 16
#define HIFC_MAX_COS 8
#define INIT_FAILED 0
#define INIT_SUCCESS 1
#define MAX_DRV_BUF_SIZE 4096
struct hifc_cmd_get_light_module_abs {
u8 status;
u8 version;
u8 rsvd0[6];
u8 port_id;
u8 abs_status; /* 0:present, 1:absent */
u8 rsv[2];
};
#define SFP_INFO_MAX_SIZE 512
struct hifc_cmd_get_sfp_qsfp_info {
u8 status;
u8 version;
u8 rsvd0[6];
u8 port_id;
u8 wire_type;
u16 out_len;
u8 sfp_qsfp_info[SFP_INFO_MAX_SIZE];
};
#define HIFC_MAX_PORT_ID 4
struct hifc_port_routine_cmd {
bool up_send_sfp_info;
bool up_send_sfp_abs;
struct hifc_cmd_get_sfp_qsfp_info sfp_info;
struct hifc_cmd_get_light_module_abs abs;
};
struct card_node {
struct list_head node;
struct list_head func_list;
char chip_name[IFNAMSIZ];
void *log_info;
void *dbgtool_info;
void *func_handle_array[MAX_FUNCTION_NUM];
unsigned char dp_bus_num;
u8 func_num;
struct attribute dbgtool_attr_file;
bool cos_up_setted;
u8 cos_up[HIFC_MAX_COS];
bool ppf_state;
u8 pf_bus_num[HIFC_MAX_PF_NUM];
struct hifc_port_routine_cmd rt_cmd[HIFC_MAX_PORT_ID];
/* mutex used for copy sfp info */
struct mutex sfp_mutex;
};
enum hifc_hwdev_init_state {
HIFC_HWDEV_NONE_INITED = 0,
HIFC_HWDEV_CLP_INITED,
HIFC_HWDEV_AEQ_INITED,
HIFC_HWDEV_MGMT_INITED,
HIFC_HWDEV_MBOX_INITED,
HIFC_HWDEV_CMDQ_INITED,
HIFC_HWDEV_COMM_CH_INITED,
HIFC_HWDEV_ALL_INITED,
HIFC_HWDEV_MAX_INVAL_INITED
};
enum hifc_func_cap {
/* send message to mgmt cpu directly */
HIFC_FUNC_MGMT = 1 << 0,
/* setting port attribute, pause/speed etc. */
HIFC_FUNC_PORT = 1 << 1,
/* Enable SR-IOV in default */
HIFC_FUNC_SRIOV_EN_DFLT = 1 << 2,
/* Can't change VF num */
HIFC_FUNC_SRIOV_NUM_FIX = 1 << 3,
/* Fcorce pf/vf link up */
HIFC_FUNC_FORCE_LINK_UP = 1 << 4,
/* Support rate limit */
HIFC_FUNC_SUPP_RATE_LIMIT = 1 << 5,
HIFC_FUNC_SUPP_DFX_REG = 1 << 6,
/* Support promisc/multicast/all-multi */
HIFC_FUNC_SUPP_RX_MODE = 1 << 7,
/* Set vf mac and vlan by ip link */
HIFC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8,
/* Support set mac by ifconfig */
HIFC_FUNC_SUPP_CHANGE_MAC = 1 << 9,
/* OVS don't support SCTP_CRC/HW_VLAN/LRO */
HIFC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10,
};
#define FUNC_SUPPORT_MGMT(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_MGMT))
#define FUNC_SUPPORT_PORT_SETTING(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_PORT))
#define FUNC_SUPPORT_DCB(hwdev) \
(FUNC_SUPPORT_PORT_SETTING(hwdev))
#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_SRIOV_EN_DFLT))
#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_SRIOV_NUM_FIX))
#define FUNC_SUPPORT_RX_MODE(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_SUPP_RX_MODE))
#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_SUPP_RATE_LIMIT))
#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_SUPP_SET_VF_MAC_VLAN))
#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_SUPP_CHANGE_MAC))
#define FUNC_FORCE_LINK_UP(hwdev) \
(!!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_FORCE_LINK_UP))
#define FUNC_SUPPORT_SCTP_CRC(hwdev) \
(!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_OFFLOAD_OVS_UNSUPP))
#define FUNC_SUPPORT_HW_VLAN(hwdev) \
(!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_OFFLOAD_OVS_UNSUPP))
#define FUNC_SUPPORT_LRO(hwdev) \
(!(hifc_get_func_feature_cap(hwdev) & \
HIFC_FUNC_OFFLOAD_OVS_UNSUPP))
int hifc_init_hwdev(struct hifc_init_para *para);
void hifc_free_hwdev(void *hwdev);
int hifc_stateful_init(void *hwdev);
void hifc_stateful_deinit(void *hwdev);
bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state);
u64 hifc_get_func_feature_cap(void *hwdev);
int hifc_slq_init(void *dev, int num_wqs);
void hifc_slq_uninit(void *dev);
int hifc_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth,
u16 page_size, u64 *cla_addr, void **handle);
void hifc_slq_free(void *dev, void *handle);
u64 hifc_slq_get_addr(void *handle, u16 index);
u64 hifc_slq_get_first_pageaddr(void *handle);
typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in,
u16 in_size, void *buf_out,
u16 *out_size);
void hifc_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd,
comm_up_self_msg_proc proc);
void hifc_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd);
/* defined by chip */
enum hifc_fault_type {
FAULT_TYPE_CHIP,
FAULT_TYPE_UCODE,
FAULT_TYPE_MEM_RD_TIMEOUT,
FAULT_TYPE_MEM_WR_TIMEOUT,
FAULT_TYPE_REG_RD_TIMEOUT,
FAULT_TYPE_REG_WR_TIMEOUT,
FAULT_TYPE_PHY_FAULT,
FAULT_TYPE_MAX,
};
/* defined by chip */
enum hifc_fault_err_level {
/* default err_level=FAULT_LEVEL_FATAL if
* type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT ||
* FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT ||
* FAULT_TYPE_UCODE
* other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP
*/
FAULT_LEVEL_FATAL,
FAULT_LEVEL_SERIOUS_RESET,
FAULT_LEVEL_SERIOUS_FLR,
FAULT_LEVEL_GENERAL,
FAULT_LEVEL_SUGGESTION,
FAULT_LEVEL_MAX
};
enum hifc_fault_source_type {
/* same as FAULT_TYPE_CHIP */
HIFC_FAULT_SRC_HW_MGMT_CHIP = 0,
/* same as FAULT_TYPE_UCODE */
HIFC_FAULT_SRC_HW_MGMT_UCODE,
/* same as FAULT_TYPE_MEM_RD_TIMEOUT */
HIFC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT,
/* same as FAULT_TYPE_MEM_WR_TIMEOUT */
HIFC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT,
/* same as FAULT_TYPE_REG_RD_TIMEOUT */
HIFC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT,
/* same as FAULT_TYPE_REG_WR_TIMEOUT */
HIFC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT,
HIFC_FAULT_SRC_SW_MGMT_UCODE,
HIFC_FAULT_SRC_MGMT_WATCHDOG,
HIFC_FAULT_SRC_MGMT_RESET = 8,
HIFC_FAULT_SRC_HW_PHY_FAULT,
HIFC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20,
HIFC_FAULT_SRC_TYPE_MAX,
};
struct hifc_fault_sw_mgmt {
u8 event_id;
u64 event_data;
};
union hifc_fault_hw_mgmt {
u32 val[4];
/* valid only type==FAULT_TYPE_CHIP */
struct {
u8 node_id;
/* enum hifc_fault_err_level */
u8 err_level;
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
/* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR
*/
u16 func_id;
u16 rsvd2;
} chip;
/* valid only type==FAULT_TYPE_UCODE */
struct {
u8 cause_id;
u8 core_id;
u8 c_id;
u8 rsvd3;
u32 epc;
u32 rsvd4;
u32 rsvd5;
} ucode;
/* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT ||
* FAULT_TYPE_MEM_WR_TIMEOUT
*/
struct {
u32 err_csr_ctrl;
u32 err_csr_data;
u32 ctrl_tab;
u32 mem_index;
} mem_timeout;
/* valid only type==FAULT_TYPE_REG_RD_TIMEOUT ||
* FAULT_TYPE_REG_WR_TIMEOUT
*/
struct {
u32 err_csr;
u32 rsvd6;
u32 rsvd7;
u32 rsvd8;
} reg_timeout;
struct {
/* 0: read; 1: write */
u8 op_type;
u8 port_id;
u8 dev_ad;
u8 rsvd9;
u32 csr_addr;
u32 op_data;
u32 rsvd10;
} phy_fault;
};
/* defined by chip */
struct hifc_fault_event {
/* enum hifc_fault_type */
u8 type;
u8 rsvd0[3];
union hifc_fault_hw_mgmt event;
};
struct hifc_fault_recover_info {
u8 fault_src; /* enum hifc_fault_source_type */
u8 fault_lev; /* enum hifc_fault_err_level */
u8 rsvd0[2];
union {
union hifc_fault_hw_mgmt hw_mgmt;
struct hifc_fault_sw_mgmt sw_mgmt;
u32 mgmt_rsvd[4];
u32 host_rsvd[4];
} fault_data;
};
struct hifc_dcb_state {
u8 dcb_on;
u8 default_cos;
u8 up_cos[8];
};
enum link_err_type {
LINK_ERR_MODULE_UNRECOGENIZED,
LINK_ERR_NUM,
};
enum port_module_event_type {
HIFC_PORT_MODULE_CABLE_PLUGGED,
HIFC_PORT_MODULE_CABLE_UNPLUGGED,
HIFC_PORT_MODULE_LINK_ERR,
HIFC_PORT_MODULE_MAX_EVENT,
};
struct hifc_port_module_event {
enum port_module_event_type type;
enum link_err_type err_type;
};
struct hifc_event_link_info {
u8 valid;
u8 port_type;
u8 autoneg_cap;
u8 autoneg_state;
u8 duplex;
u8 speed;
};
struct hifc_mctp_host_info {
u8 major_cmd;
u8 sub_cmd;
u8 rsvd[2];
u32 data_len;
void *data;
};
enum hifc_event_type {
HIFC_EVENT_LINK_DOWN = 0,
HIFC_EVENT_LINK_UP = 1,
HIFC_EVENT_HEART_LOST = 2,
HIFC_EVENT_FAULT = 3,
HIFC_EVENT_NOTIFY_VF_DCB_STATE = 4,
HIFC_EVENT_DCB_STATE_CHANGE = 5,
HIFC_EVENT_FMW_ACT_NTC = 6,
HIFC_EVENT_PORT_MODULE_EVENT = 7,
HIFC_EVENT_MCTP_GET_HOST_INFO,
HIFC_EVENT_MULTI_HOST_MGMT,
HIFC_EVENT_INIT_MIGRATE_PF,
};
struct hifc_event_info {
enum hifc_event_type type;
union {
struct hifc_event_link_info link_info;
struct hifc_fault_event info;
struct hifc_dcb_state dcb_state;
struct hifc_port_module_event module_event;
u8 vf_default_cos;
struct hifc_mctp_host_info mctp_info;
};
};
enum hifc_ucode_event_type {
HIFC_INTERNAL_TSO_FATAL_ERROR = 0x0,
HIFC_INTERNAL_LRO_FATAL_ERROR = 0x1,
HIFC_INTERNAL_TX_FATAL_ERROR = 0x2,
HIFC_INTERNAL_RX_FATAL_ERROR = 0x3,
HIFC_INTERNAL_OTHER_FATAL_ERROR = 0x4,
HIFC_NIC_FATAL_ERROR_MAX = 0x8,
};
typedef void (*hifc_event_handler)(void *handle,
struct hifc_event_info *event);
/* only register once */
void hifc_event_register(void *dev, void *pri_handle,
hifc_event_handler callback);
void hifc_event_unregister(void *dev);
void hifc_detect_hw_present(void *hwdev);
void hifc_set_chip_absent(void *hwdev);
int hifc_get_chip_present_flag(void *hwdev);
void hifc_set_pcie_order_cfg(void *handle);
int hifc_get_mgmt_channel_status(void *handle);
struct hifc_board_info {
u32 board_type;
u32 port_num;
u32 port_speed;
u32 pcie_width;
u32 host_num;
u32 pf_num;
u32 vf_total_num;
u32 tile_num;
u32 qcm_num;
u32 core_num;
u32 work_mode;
u32 service_mode;
u32 pcie_mode;
u32 cfg_addr;
u32 boot_sel;
u32 board_id;
};
int hifc_get_board_info(void *hwdev, struct hifc_board_info *info);
int hifc_get_card_present_state(void *hwdev, bool *card_present_state);
#endif

3674
hifc/hifc_hwdev.c Normal file

File diff suppressed because it is too large Load Diff

456
hifc/hifc_hwdev.h Normal file
View File

@ -0,0 +1,456 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_HWDEV_H_
#define HIFC_HWDEV_H_
/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */
#define HIFC_DEFAULT_WQ_PAGE_SIZE 0x40000
#define HIFC_HW_WQ_PAGE_SIZE 0x1000
#define HIFC_MSG_TO_MGMT_MAX_LEN 2016
#define HIFC_MGMT_STATUS_ERR_OK 0 /* Ok */
#define HIFC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */
#define HIFC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */
#define HIFC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */
#define HIFC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */
#define HIFC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */
#define HIFC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */
#define HIFC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */
#define HIFC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */
#define HIFC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */
#define HIFC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */
#define HIFC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */
#define HIFC_MGMT_STATUS_ERR_FULL 12 /* Table full */
#define HIFC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */
#define HIFC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */
#define HIFC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */
#define HIFC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */
#define HIFC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */
#define HIFC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */
#define HIFC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */
#define HIFC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */
#define HIFC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */
#define HIFC_MGMT_STATUS_ERR_UNSUPPORT 0xFF /* Feature not supported*/
/* Qe buffer relates define */
enum hifc_rx_buf_size {
HIFC_RX_BUF_SIZE_32B = 0x20,
HIFC_RX_BUF_SIZE_64B = 0x40,
HIFC_RX_BUF_SIZE_96B = 0x60,
HIFC_RX_BUF_SIZE_128B = 0x80,
HIFC_RX_BUF_SIZE_192B = 0xC0,
HIFC_RX_BUF_SIZE_256B = 0x100,
HIFC_RX_BUF_SIZE_384B = 0x180,
HIFC_RX_BUF_SIZE_512B = 0x200,
HIFC_RX_BUF_SIZE_768B = 0x300,
HIFC_RX_BUF_SIZE_1K = 0x400,
HIFC_RX_BUF_SIZE_1_5K = 0x600,
HIFC_RX_BUF_SIZE_2K = 0x800,
HIFC_RX_BUF_SIZE_3K = 0xC00,
HIFC_RX_BUF_SIZE_4K = 0x1000,
HIFC_RX_BUF_SIZE_8K = 0x2000,
HIFC_RX_BUF_SIZE_16K = 0x4000,
};
enum hifc_res_state {
HIFC_RES_CLEAN = 0,
HIFC_RES_ACTIVE = 1,
};
enum ppf_tmr_status {
HIFC_PPF_TMR_FLAG_STOP,
HIFC_PPF_TMR_FLAG_START,
};
struct cfg_mgmt_info;
struct hifc_hwif;
struct hifc_wqs;
struct hifc_aeqs;
struct hifc_ceqs;
struct hifc_msg_pf_to_mgmt;
struct hifc_cmdqs;
struct hifc_root_ctxt {
u8 status;
u8 version;
u8 rsvd0[6];
u16 func_idx;
u16 rsvd1;
u8 set_cmdq_depth;
u8 cmdq_depth;
u8 lro_en;
u8 rsvd2;
u8 ppf_idx;
u8 rsvd3;
u16 rq_depth;
u16 rx_buf_sz;
u16 sq_depth;
};
struct hifc_page_addr {
void *virt_addr;
u64 phys_addr;
};
#define HIFC_PCIE_LINK_DOWN 0xFFFFFFFF
#define HIFC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000)
#define HIFC_DEV_BUSY_ACTIVE_FW 0xFE
#define HIFC_HW_WQ_NAME "hifc_hardware"
#define HIFC_HEARTBEAT_PERIOD 1000
#define HIFC_HEARTBEAT_START_EXPIRE 5000
#define HIFC_CHIP_ERROR_TYPE_MAX 1024
#define HIFC_CHIP_FAULT_SIZE \
(HIFC_NODE_ID_MAX * FAULT_LEVEL_MAX * HIFC_CHIP_ERROR_TYPE_MAX)
#define HIFC_CSR_DMA_ATTR_TBL_BASE 0xC80
#define HIFC_CSR_DMA_ATTR_TBL_STRIDE 0x4
#define HIFC_CSR_DMA_ATTR_TBL_ADDR(idx) \
(HIFC_CSR_DMA_ATTR_TBL_BASE \
+ (idx) * HIFC_CSR_DMA_ATTR_TBL_STRIDE)
/* MSI-X registers */
#define HIFC_CSR_MSIX_CNT_BASE 0x2004
#define HIFC_CSR_MSIX_STRIDE 0x8
#define HIFC_CSR_MSIX_CNT_ADDR(idx) \
(HIFC_CSR_MSIX_CNT_BASE + (idx) * HIFC_CSR_MSIX_STRIDE)
enum hifc_node_id {
HIFC_NODE_ID_IPSU = 4,
HIFC_NODE_ID_MGMT_HOST = 21, /*Host CPU send API to uP */
HIFC_NODE_ID_MAX = 22
};
#define HIFC_HWDEV_INIT_MODES_MASK ((1UL << HIFC_HWDEV_ALL_INITED) - 1)
enum hifc_hwdev_func_state {
HIFC_HWDEV_FUNC_INITED = HIFC_HWDEV_ALL_INITED,
HIFC_HWDEV_FUNC_DEINIT,
HIFC_HWDEV_STATE_BUSY = 31,
};
struct hifc_cqm_stats {
atomic_t cqm_cmd_alloc_cnt;
atomic_t cqm_cmd_free_cnt;
atomic_t cqm_send_cmd_box_cnt;
atomic_t cqm_db_addr_alloc_cnt;
atomic_t cqm_db_addr_free_cnt;
atomic_t cqm_fc_srq_create_cnt;
atomic_t cqm_qpc_mpt_create_cnt;
atomic_t cqm_nonrdma_queue_create_cnt;
atomic_t cqm_qpc_mpt_delete_cnt;
atomic_t cqm_nonrdma_queue_delete_cnt;
atomic_t cqm_aeq_callback_cnt[112];
};
struct hifc_link_event_stats {
atomic_t link_down_stats;
atomic_t link_up_stats;
};
struct hifc_fault_event_stats {
atomic_t chip_fault_stats[HIFC_NODE_ID_MAX][FAULT_LEVEL_MAX];
atomic_t fault_type_stat[FAULT_TYPE_MAX];
atomic_t pcie_fault_stats;
};
struct hifc_hw_stats {
atomic_t heart_lost_stats;
atomic_t nic_ucode_event_stats[HIFC_NIC_FATAL_ERROR_MAX];
struct hifc_cqm_stats cqm_stats;
struct hifc_link_event_stats link_event_stats;
struct hifc_fault_event_stats fault_event_stats;
};
struct hifc_fault_info_node {
struct list_head list;
struct hifc_hwdev *hwdev;
struct hifc_fault_recover_info info;
};
enum heartbeat_support_state {
HEARTBEAT_NOT_SUPPORT = 0,
HEARTBEAT_SUPPORT,
};
/* 25s for max 5 heartbeat event lost */
#define HIFC_HEARBEAT_ENHANCED_LOST 25000
struct hifc_heartbeat_enhanced {
bool en; /* enable enhanced heartbeat or not */
unsigned long last_update_jiffies;
u32 last_heartbeat;
unsigned long start_detect_jiffies;
};
#define HIFC_CMD_VER_FUNC_ID 2
#define HIFC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C
#define HIFC_ICPL_RESERVD_ADDR 0x9204
#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\
hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_L2NIC, cmd, \
buf_in, in_size, \
buf_out, out_size, 0)
struct hifc_hwdev {
void *adapter_hdl; /* pointer to hifc_pcidev or NDIS_Adapter */
void *pcidev_hdl; /* pointer to pcidev or Handler */
void *dev_hdl; /* pointer to pcidev->dev or Handler, for
* sdk_err() or dma_alloc()
*/
u32 wq_page_size;
void *cqm_hdl;
void *chip_node;
struct hifc_hwif *hwif; /* include void __iomem *bar */
struct cfg_mgmt_info *cfg_mgmt;
struct hifc_wqs *wqs; /* for FC slq */
struct hifc_aeqs *aeqs;
struct hifc_ceqs *ceqs;
struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt;
struct hifc_cmdqs *cmdqs;
struct hifc_page_addr page_pa0;
struct hifc_page_addr page_pa1;
hifc_event_handler event_callback;
void *event_pri_handle;
bool history_fault_flag;
struct hifc_fault_recover_info history_fault;
struct semaphore fault_list_sem;
struct work_struct timer_work;
struct workqueue_struct *workq;
struct timer_list heartbeat_timer;
/* true represent heartbeat lost, false represent heartbeat restore */
u32 heartbeat_lost;
int chip_present_flag;
struct hifc_heartbeat_enhanced heartbeat_ehd;
struct hifc_hw_stats hw_stats;
u8 *chip_fault_stats;
u32 statufull_ref_cnt;
ulong func_state;
u64 feature_cap; /* enum hifc_func_cap */
/* In bmgw x86 host, driver can't send message to mgmt cpu directly,
* need to trasmit message ppf mbox to bmgw arm host.
*/
struct hifc_board_info board_info;
};
int hifc_init_comm_ch(struct hifc_hwdev *hwdev);
void hifc_uninit_comm_ch(struct hifc_hwdev *hwdev);
enum hifc_set_arm_type {
HIFC_SET_ARM_CMDQ,
HIFC_SET_ARM_SQ,
HIFC_SET_ARM_TYPE_NUM,
};
/* up to driver event */
#define HIFC_PORT_CMD_MGMT_RESET 0x0
struct hifc_vport_state {
u8 status;
u8 version;
u8 rsvd0[6];
u16 func_id;
u16 rsvd1;
u8 state;
u8 rsvd2[3];
};
struct hifc_l2nic_reset {
u8 status;
u8 version;
u8 rsvd0[6];
u16 func_id;
u16 reset_flag;
};
/* HILINK module interface */
/* cmd of mgmt CPU message for HILINK module */
enum hifc_hilink_cmd {
HIFC_HILINK_CMD_GET_LINK_INFO = 0x3,
HIFC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
};
enum hilink_info_print_event {
HILINK_EVENT_LINK_UP = 1,
HILINK_EVENT_LINK_DOWN,
HILINK_EVENT_CABLE_PLUGGED,
HILINK_EVENT_MAX_TYPE,
};
enum hifc_link_port_type {
LINK_PORT_FIBRE = 1,
LINK_PORT_ELECTRIC,
LINK_PORT_COPPER,
LINK_PORT_AOC,
LINK_PORT_BACKPLANE,
LINK_PORT_BASET,
LINK_PORT_MAX_TYPE,
};
enum hilink_fibre_subtype {
FIBRE_SUBTYPE_SR = 1,
FIBRE_SUBTYPE_LR,
FIBRE_SUBTYPE_MAX,
};
enum hilink_fec_type {
HILINK_FEC_RSFEC,
HILINK_FEC_BASEFEC,
HILINK_FEC_NOFEC,
HILINK_FEC_MAX_TYPE,
};
/* cmd of mgmt CPU message */
enum hifc_port_cmd {
HIFC_PORT_CMD_SET_MAC = 0x9,
HIFC_PORT_CMD_GET_AUTONEG_CAP = 0xf,
HIFC_PORT_CMD_SET_VPORT_ENABLE = 0x5d,
HIFC_PORT_CMD_UPDATE_MAC = 0xa4,
HIFC_PORT_CMD_GET_SFP_INFO = 0xad,
HIFC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
HIFC_PORT_CMD_GET_SFP_ABS = 0xFB,
};
struct hi30_ffe_data {
u8 PRE2;
u8 PRE1;
u8 POST1;
u8 POST2;
u8 MAIN;
};
struct hi30_ctle_data {
u8 ctlebst[3];
u8 ctlecmband[3];
u8 ctlermband[3];
u8 ctleza[3];
u8 ctlesqh[3];
u8 ctleactgn[3];
u8 ctlepassgn;
};
#define HILINK_MAX_LANE 4
struct hilink_lane {
u8 lane_used;
u8 hi30_ffe[5];
u8 hi30_ctle[19];
u8 hi30_dfe[14];
u8 rsvd4;
};
struct hifc_link_info {
u8 vendor_name[16];
/* port type:
* 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane;
* 6 - baseT; 0xffff - unknown
*
* port subtype:
* Only when port_type is fiber:
* 1 - SR; 2 - LR
*/
u32 port_type;
u32 port_sub_type;
u32 cable_length;
u8 cable_temp;
u8 cable_max_speed; /* 1(G)/10(G)/25(G)... */
u8 sfp_type; /* 0 - qsfp; 1 - sfp */
u8 rsvd0;
u32 power[4]; /* uW; if is sfp, only power[2] is valid */
u8 an_state; /* 0 - off; 1 - on */
u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
u16 speed; /* 1(G)/10(G)/25(G)... */
u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */
u8 alos; /* 0 - yes; 1 - no */
u8 rx_los; /* 0 - yes; 1 - no */
u8 pma_status;
u32 pma_dbg_info_reg; /* pma debug info: */
u32 pma_signal_ok_reg; /* signal ok: */
u32 pcs_err_blk_cnt_reg; /* error block counter: */
u32 rf_lf_status_reg; /* RF/LF status: */
u8 pcs_link_reg; /* pcs link: */
u8 mac_link_reg; /* mac link: */
u8 mac_tx_en;
u8 mac_rx_en;
u32 pcs_err_cnt;
/* struct hifc_hilink_lane: 40 bytes */
u8 lane1[40]; /* 25GE lane in old firmware */
u8 rsvd1[266]; /* hilink machine state */
u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */
u8 rsvd2[2];
};
struct hifc_hilink_link_info {
u8 status;
u8 version;
u8 rsvd0[6];
u16 port_id;
u8 info_type; /* 1: link up 2: link down 3 cable plugged */
u8 rsvd1;
struct hifc_link_info info;
u8 rsvd2[352];
};
int hifc_set_arm_bit(void *hwdev, enum hifc_set_arm_type q_type, u16 q_id);
void hifc_set_chip_present(void *hwdev);
void hifc_force_complete_all(void *hwdev);
void hifc_init_heartbeat(struct hifc_hwdev *hwdev);
void hifc_destroy_heartbeat(struct hifc_hwdev *hwdev);
u8 hifc_nic_sw_aeqe_handler(void *handle, u8 event, u64 data);
int hifc_l2nic_reset_base(struct hifc_hwdev *hwdev, u16 reset_flag);
int hifc_pf_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
void *buf_in, u16 in_size,
void *buf_out, u16 *out_size, u32 timeout);
void hifc_swe_fault_handler(struct hifc_hwdev *hwdev, u8 level,
u8 event, u64 val);
bool hifc_mgmt_event_ack_first(u8 mod, u8 cmd);
int hifc_phy_init_status_judge(void *hwdev);
int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val);
int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val);
void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size);
struct hifc_sge {
u32 hi_addr;
u32 lo_addr;
u32 len;
};
void hifc_cpu_to_be32(void *data, int len);
void hifc_be32_to_cpu(void *data, int len);
void hifc_set_sge(struct hifc_sge *sge, dma_addr_t addr, u32 len);
#endif

630
hifc/hifc_hwif.c Normal file
View File

@ -0,0 +1,630 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_eqs.h"
#define WAIT_HWIF_READY_TIMEOUT 10000
#define HIFC_SELFTEST_RESULT 0x883C
u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg)
{
return be32_to_cpu(readl(hwif->cfg_regs_base + reg));
}
void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val)
{
writel(cpu_to_be32(val), hwif->cfg_regs_base + reg);
}
/**
* hwif_ready - test if the HW initialization passed
* @hwdev: the pointer to hw device
* Return: 0 - success, negative - failure
**/
static int hwif_ready(struct hifc_hwdev *hwdev)
{
u32 addr, attr1;
addr = HIFC_CSR_FUNC_ATTR1_ADDR;
attr1 = hifc_hwif_read_reg(hwdev->hwif, addr);
if (attr1 == HIFC_PCIE_LINK_DOWN)
return -EBUSY;
if (!HIFC_AF1_GET(attr1, MGMT_INIT_STATUS))
return -EBUSY;
return 0;
}
static int wait_hwif_ready(struct hifc_hwdev *hwdev)
{
ulong timeout = 0;
do {
if (!hwif_ready(hwdev))
return 0;
usleep_range(999, 1000);
timeout++;
} while (timeout <= WAIT_HWIF_READY_TIMEOUT);
sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n");
return -EBUSY;
}
/**
* set_hwif_attr - set the attributes as members in hwif
* @hwif: the hardware interface of a pci function device
* @attr0: the first attribute that was read from the hw
* @attr1: the second attribute that was read from the hw
* @attr2: the third attribute that was read from the hw
**/
static void set_hwif_attr(struct hifc_hwif *hwif, u32 attr0, u32 attr1,
u32 attr2)
{
hwif->attr.func_global_idx = HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
hwif->attr.port_to_port_idx = HIFC_AF0_GET(attr0, P2P_IDX);
hwif->attr.pci_intf_idx = HIFC_AF0_GET(attr0, PCI_INTF_IDX);
hwif->attr.vf_in_pf = HIFC_AF0_GET(attr0, VF_IN_PF);
hwif->attr.func_type = HIFC_AF0_GET(attr0, FUNC_TYPE);
hwif->attr.ppf_idx = HIFC_AF1_GET(attr1, PPF_IDX);
hwif->attr.num_aeqs = BIT(HIFC_AF1_GET(attr1, AEQS_PER_FUNC));
hwif->attr.num_ceqs = BIT(HIFC_AF1_GET(attr1, CEQS_PER_FUNC));
hwif->attr.num_irqs = BIT(HIFC_AF1_GET(attr1, IRQS_PER_FUNC));
hwif->attr.num_dma_attr = BIT(HIFC_AF1_GET(attr1, DMA_ATTR_PER_FUNC));
}
/**
* get_hwif_attr - read and set the attributes as members in hwif
* @hwif: the hardware interface of a pci function device
**/
static void get_hwif_attr(struct hifc_hwif *hwif)
{
u32 addr, attr0, attr1, attr2;
addr = HIFC_CSR_FUNC_ATTR0_ADDR;
attr0 = hifc_hwif_read_reg(hwif, addr);
addr = HIFC_CSR_FUNC_ATTR1_ADDR;
attr1 = hifc_hwif_read_reg(hwif, addr);
addr = HIFC_CSR_FUNC_ATTR2_ADDR;
attr2 = hifc_hwif_read_reg(hwif, addr);
set_hwif_attr(hwif, attr0, attr1, attr2);
}
void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status)
{
u32 attr5 = HIFC_AF5_SET(status, PF_STATUS);
u32 addr = HIFC_CSR_FUNC_ATTR5_ADDR;
hifc_hwif_write_reg(hwif, addr, attr5);
}
enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif)
{
u32 attr5 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR5_ADDR);
return HIFC_AF5_GET(attr5, PF_STATUS);
}
enum hifc_doorbell_ctrl hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif)
{
u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR);
return HIFC_AF4_GET(attr4, DOORBELL_CTRL);
}
enum hifc_outbound_ctrl hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif)
{
u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR);
return HIFC_AF4_GET(attr4, OUTBOUND_CTRL);
}
void hifc_enable_doorbell(struct hifc_hwif *hwif)
{
u32 addr, attr4;
addr = HIFC_CSR_FUNC_ATTR4_ADDR;
attr4 = hifc_hwif_read_reg(hwif, addr);
attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL);
attr4 |= HIFC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL);
hifc_hwif_write_reg(hwif, addr, attr4);
}
void hifc_disable_doorbell(struct hifc_hwif *hwif)
{
u32 addr, attr4;
addr = HIFC_CSR_FUNC_ATTR4_ADDR;
attr4 = hifc_hwif_read_reg(hwif, addr);
attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL);
attr4 |= HIFC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL);
hifc_hwif_write_reg(hwif, addr, attr4);
}
/**
* set_ppf - try to set hwif as ppf and set the type of hwif in this case
* @hwif: the hardware interface of a pci function device
**/
static void set_ppf(struct hifc_hwif *hwif)
{
struct hifc_func_attr *attr = &hwif->attr;
u32 addr, val, ppf_election;
/* Read Modify Write */
addr = HIFC_CSR_PPF_ELECTION_ADDR;
val = hifc_hwif_read_reg(hwif, addr);
val = HIFC_PPF_ELECTION_CLEAR(val, IDX);
ppf_election = HIFC_PPF_ELECTION_SET(attr->func_global_idx, IDX);
val |= ppf_election;
hifc_hwif_write_reg(hwif, addr, val);
/* Check PPF */
val = hifc_hwif_read_reg(hwif, addr);
attr->ppf_idx = HIFC_PPF_ELECTION_GET(val, IDX);
if (attr->ppf_idx == attr->func_global_idx)
attr->func_type = TYPE_PPF;
}
/**
* get_mpf - get the mpf index into the hwif
* @hwif: the hardware interface of a pci function device
**/
static void get_mpf(struct hifc_hwif *hwif)
{
struct hifc_func_attr *attr = &hwif->attr;
u32 mpf_election, addr;
addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR;
mpf_election = hifc_hwif_read_reg(hwif, addr);
attr->mpf_idx = HIFC_MPF_ELECTION_GET(mpf_election, IDX);
}
/**
* set_mpf - try to set hwif as mpf and set the mpf idx in hwif
* @hwif: the hardware interface of a pci function device
**/
static void set_mpf(struct hifc_hwif *hwif)
{
struct hifc_func_attr *attr = &hwif->attr;
u32 addr, val, mpf_election;
/* Read Modify Write */
addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR;
val = hifc_hwif_read_reg(hwif, addr);
val = HIFC_MPF_ELECTION_CLEAR(val, IDX);
mpf_election = HIFC_MPF_ELECTION_SET(attr->func_global_idx, IDX);
val |= mpf_election;
hifc_hwif_write_reg(hwif, addr, val);
}
static void init_db_area_idx(struct hifc_free_db_area *free_db_area)
{
u32 i;
for (i = 0; i < HIFC_DB_MAX_AREAS; i++)
free_db_area->db_idx[i] = i;
free_db_area->num_free = HIFC_DB_MAX_AREAS;
spin_lock_init(&free_db_area->idx_lock);
}
static int get_db_idx(struct hifc_hwif *hwif, u32 *idx)
{
struct hifc_free_db_area *free_db_area = &hwif->free_db_area;
u32 pos;
u32 pg_idx;
spin_lock(&free_db_area->idx_lock);
retry:
if (free_db_area->num_free == 0) {
spin_unlock(&free_db_area->idx_lock);
return -ENOMEM;
}
free_db_area->num_free--;
pos = free_db_area->alloc_pos++;
pos &= HIFC_DB_MAX_AREAS - 1;
pg_idx = free_db_area->db_idx[pos];
free_db_area->db_idx[pos] = 0xFFFFFFFF;
/* pg_idx out of range */
if (pg_idx >= HIFC_DB_MAX_AREAS)
goto retry;
spin_unlock(&free_db_area->idx_lock);
*idx = pg_idx;
return 0;
}
static void free_db_idx(struct hifc_hwif *hwif, u32 idx)
{
struct hifc_free_db_area *free_db_area = &hwif->free_db_area;
u32 pos;
if (idx >= HIFC_DB_MAX_AREAS)
return;
spin_lock(&free_db_area->idx_lock);
pos = free_db_area->return_pos++;
pos &= HIFC_DB_MAX_AREAS - 1;
free_db_area->db_idx[pos] = idx;
free_db_area->num_free++;
spin_unlock(&free_db_area->idx_lock);
}
void hifc_free_db_addr(void *hwdev, void __iomem *db_base,
void __iomem *dwqe_base)
{
struct hifc_hwif *hwif;
u32 idx;
if (!hwdev || !db_base)
return;
hwif = ((struct hifc_hwdev *)hwdev)->hwif;
idx = DB_IDX(db_base, hwif->db_base);
#if defined(__aarch64__)
/* No need to unmap */
#else
if (dwqe_base)
io_mapping_unmap(dwqe_base);
#endif
free_db_idx(hwif, idx);
}
int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base,
void __iomem **dwqe_base)
{
struct hifc_hwif *hwif;
u64 offset;
u32 idx;
int err;
if (!hwdev || !db_base)
return -EINVAL;
hwif = ((struct hifc_hwdev *)hwdev)->hwif;
err = get_db_idx(hwif, &idx);
if (err)
return -EFAULT;
*db_base = hwif->db_base + idx * HIFC_DB_PAGE_SIZE;
if (!dwqe_base)
return 0;
offset = ((u64)idx) << PAGE_SHIFT;
#if defined(__aarch64__)
*dwqe_base = hwif->dwqe_mapping + offset;
#else
*dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset,
HIFC_DB_PAGE_SIZE);
#endif
if (!(*dwqe_base)) {
hifc_free_db_addr(hwdev, *db_base, NULL);
return -EFAULT;
}
return 0;
}
void hifc_set_msix_state(void *hwdev, u16 msix_idx, enum hifc_msix_state flag)
{
struct hifc_hwif *hwif;
u32 offset = msix_idx * HIFC_PCI_MSIX_ENTRY_SIZE +
HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL;
u32 mask_bits;
if (!hwdev)
return;
hwif = ((struct hifc_hwdev *)hwdev)->hwif;
mask_bits = readl(hwif->intr_regs_base + offset);
mask_bits &= ~HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
if (flag)
mask_bits |= HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, hwif->intr_regs_base + offset);
}
static void disable_all_msix(struct hifc_hwdev *hwdev)
{
u16 num_irqs = hwdev->hwif->attr.num_irqs;
u16 i;
for (i = 0; i < num_irqs; i++)
hifc_set_msix_state(hwdev, i, HIFC_MSIX_DISABLE);
}
static int wait_until_doorbell_and_outbound_enabled(struct hifc_hwif *hwif)
{
enum hifc_doorbell_ctrl db_ctrl;
enum hifc_outbound_ctrl outbound_ctrl;
u32 cnt = 0;
while (cnt < HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) {
db_ctrl = hifc_get_doorbell_ctrl_status(hwif);
outbound_ctrl = hifc_get_outbound_ctrl_status(hwif);
if (outbound_ctrl == ENABLE_OUTBOUND &&
db_ctrl == ENABLE_DOORBELL)
return 0;
usleep_range(900, 1000);
cnt++;
}
return -EFAULT;
}
static void __print_selftest_reg(struct hifc_hwdev *hwdev)
{
u32 addr, attr0, attr1;
addr = HIFC_CSR_FUNC_ATTR1_ADDR;
attr1 = hifc_hwif_read_reg(hwdev->hwif, addr);
if (attr1 == HIFC_PCIE_LINK_DOWN) {
sdk_err(hwdev->dev_hdl, "PCIE is link down\n");
return;
}
addr = HIFC_CSR_FUNC_ATTR0_ADDR;
attr0 = hifc_hwif_read_reg(hwdev->hwif, addr);
if (HIFC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF &&
!HIFC_AF0_GET(attr0, PCI_INTF_IDX))
sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n",
hifc_hwif_read_reg(hwdev->hwif,
HIFC_SELFTEST_RESULT));
}
/**
* hifc_init_hwif - initialize the hw interface
* @hwdev: the pointer to hw device
* @cfg_reg_base: configuration base address
* Return: 0 - success, negative - failure
**/
int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base,
void *intr_reg_base, u64 db_base_phy,
void *db_base, void *dwqe_mapping)
{
struct hifc_hwif *hwif;
int err;
hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
if (!hwif)
return -ENOMEM;
hwdev->hwif = hwif;
hwif->pdev = hwdev->pcidev_hdl;
hwif->cfg_regs_base = cfg_reg_base;
hwif->intr_regs_base = intr_reg_base;
hwif->db_base_phy = db_base_phy;
hwif->db_base = db_base;
hwif->dwqe_mapping = dwqe_mapping;
init_db_area_idx(&hwif->free_db_area);
err = wait_hwif_ready(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Chip status is not ready\n");
__print_selftest_reg(hwdev);
goto hwif_ready_err;
}
get_hwif_attr(hwif);
err = wait_until_doorbell_and_outbound_enabled(hwif);
if (err) {
sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n");
goto hwif_ready_err;
}
set_ppf(hwif);
if (HIFC_IS_PPF(hwdev))
set_mpf(hwif);
get_mpf(hwif);
disable_all_msix(hwdev);
/* disable mgmt cpu report any event */
hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT);
pr_info("global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf: %d\n",
hwif->attr.func_global_idx, hwif->attr.func_type,
hwif->attr.pci_intf_idx, hwif->attr.ppf_idx,
hwif->attr.mpf_idx);
return 0;
hwif_ready_err:
kfree(hwif);
return err;
}
/**
* hifc_free_hwif - free the hw interface
* @hwdev: the pointer to hw device
**/
void hifc_free_hwif(struct hifc_hwdev *hwdev)
{
kfree(hwdev->hwif);
}
int hifc_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align,
unsigned flag,
struct hifc_dma_addr_align *mem_align)
{
void *vaddr, *align_vaddr;
dma_addr_t paddr, align_paddr;
u64 real_size = size;
vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag);
if (!vaddr)
return -ENOMEM;
align_paddr = ALIGN(paddr, align);
/* align */
if (align_paddr == paddr) {
align_vaddr = vaddr;
goto out;
}
dma_free_coherent(dev_hdl, real_size, vaddr, paddr);
/* realloc memory for align */
real_size = size + align;
vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag);
if (!vaddr)
return -ENOMEM;
align_paddr = ALIGN(paddr, align);
align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr));
out:
mem_align->real_size = (u32)real_size;
mem_align->ori_vaddr = vaddr;
mem_align->ori_paddr = paddr;
mem_align->align_vaddr = align_vaddr;
mem_align->align_paddr = align_paddr;
return 0;
}
void hifc_dma_free_coherent_align(void *dev_hdl,
struct hifc_dma_addr_align *mem_align)
{
dma_free_coherent(dev_hdl, mem_align->real_size,
mem_align->ori_vaddr, mem_align->ori_paddr);
}
u16 hifc_global_func_id(void *hwdev)
{
struct hifc_hwif *hwif;
if (!hwdev)
return 0;
hwif = ((struct hifc_hwdev *)hwdev)->hwif;
return hwif->attr.func_global_idx;
}
/**
* get function id from register,used by sriov hot migration process
* @hwdev: the pointer to hw device
**/
u16 hifc_global_func_id_hw(void *hwdev)
{
u32 addr, attr0;
struct hifc_hwdev *dev;
dev = (struct hifc_hwdev *)hwdev;
addr = HIFC_CSR_FUNC_ATTR0_ADDR;
attr0 = hifc_hwif_read_reg(dev->hwif, addr);
return HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
}
/**
* get function id, used by sriov hot migratition process.
* @hwdev: the pointer to hw device
* @func_id: function id
**/
int hifc_global_func_id_get(void *hwdev, u16 *func_id)
{
*func_id = hifc_global_func_id(hwdev);
return 0;
}
u8 hifc_pcie_itf_id(void *hwdev)
{
struct hifc_hwif *hwif;
if (!hwdev)
return 0;
hwif = ((struct hifc_hwdev *)hwdev)->hwif;
return hwif->attr.pci_intf_idx;
}
EXPORT_SYMBOL(hifc_pcie_itf_id);
enum func_type hifc_func_type(void *hwdev)
{
struct hifc_hwif *hwif;
if (!hwdev)
return 0;
hwif = ((struct hifc_hwdev *)hwdev)->hwif;
return hwif->attr.func_type;
}
u8 hifc_ppf_idx(void *hwdev)
{
struct hifc_hwif *hwif;
if (!hwdev)
return 0;
hwif = ((struct hifc_hwdev *)hwdev)->hwif;
return hwif->attr.ppf_idx;
}

243
hifc/hifc_hwif.h Normal file
View File

@ -0,0 +1,243 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_HWIF_H
#define HIFC_HWIF_H
#include "hifc_hwdev.h"
#define HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000
#define HIFC_CSR_GLOBAL_BASE_ADDR 0x4000
/* HW interface registers */
#define HIFC_CSR_FUNC_ATTR0_ADDR 0x0
#define HIFC_CSR_FUNC_ATTR1_ADDR 0x4
#define HIFC_CSR_FUNC_ATTR2_ADDR 0x8
#define HIFC_CSR_FUNC_ATTR4_ADDR 0x10
#define HIFC_CSR_FUNC_ATTR5_ADDR 0x14
#define HIFC_PCI_MSIX_ENTRY_SIZE 16
#define HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
#define HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128*/
#define HIFC_DB_DWQE_SIZE 0x00080000
/* db/dwqe page size: 4K */
#define HIFC_DB_PAGE_SIZE 0x00001000ULL
#define HIFC_DB_MAX_AREAS (HIFC_DB_DWQE_SIZE / HIFC_DB_PAGE_SIZE)
#define HIFC_ELECTION_BASE 0x200
#define HIFC_PPF_ELECTION_STRIDE 0x4
#define HIFC_CSR_MAX_PORTS 4
#define HIFC_CSR_PPF_ELECTION_ADDR \
(HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE)
#define HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR \
(HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE + \
HIFC_CSR_MAX_PORTS * HIFC_PPF_ELECTION_STRIDE)
#define DB_IDX(db, db_base) \
((u32)(((ulong)(db) - (ulong)(db_base)) / \
HIFC_DB_PAGE_SIZE))
#define HIFC_AF0_FUNC_GLOBAL_IDX_SHIFT 0
#define HIFC_AF0_P2P_IDX_SHIFT 10
#define HIFC_AF0_PCI_INTF_IDX_SHIFT 14
#define HIFC_AF0_VF_IN_PF_SHIFT 16
#define HIFC_AF0_FUNC_TYPE_SHIFT 24
#define HIFC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF
#define HIFC_AF0_P2P_IDX_MASK 0xF
#define HIFC_AF0_PCI_INTF_IDX_MASK 0x3
#define HIFC_AF0_VF_IN_PF_MASK 0xFF
#define HIFC_AF0_FUNC_TYPE_MASK 0x1
#define HIFC_AF0_GET(val, member) \
(((val) >> HIFC_AF0_##member##_SHIFT) & HIFC_AF0_##member##_MASK)
#define HIFC_AF1_PPF_IDX_SHIFT 0
#define HIFC_AF1_AEQS_PER_FUNC_SHIFT 8
#define HIFC_AF1_CEQS_PER_FUNC_SHIFT 12
#define HIFC_AF1_IRQS_PER_FUNC_SHIFT 20
#define HIFC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24
#define HIFC_AF1_MGMT_INIT_STATUS_SHIFT 30
#define HIFC_AF1_PF_INIT_STATUS_SHIFT 31
#define HIFC_AF1_PPF_IDX_MASK 0x1F
#define HIFC_AF1_AEQS_PER_FUNC_MASK 0x3
#define HIFC_AF1_CEQS_PER_FUNC_MASK 0x7
#define HIFC_AF1_IRQS_PER_FUNC_MASK 0xF
#define HIFC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7
#define HIFC_AF1_MGMT_INIT_STATUS_MASK 0x1
#define HIFC_AF1_PF_INIT_STATUS_MASK 0x1
#define HIFC_AF1_GET(val, member) \
(((val) >> HIFC_AF1_##member##_SHIFT) & HIFC_AF1_##member##_MASK)
#define HIFC_AF4_OUTBOUND_CTRL_SHIFT 0
#define HIFC_AF4_DOORBELL_CTRL_SHIFT 1
#define HIFC_AF4_OUTBOUND_CTRL_MASK 0x1
#define HIFC_AF4_DOORBELL_CTRL_MASK 0x1
#define HIFC_AF4_GET(val, member) \
(((val) >> HIFC_AF4_##member##_SHIFT) & HIFC_AF4_##member##_MASK)
#define HIFC_AF4_SET(val, member) \
(((val) & HIFC_AF4_##member##_MASK) << HIFC_AF4_##member##_SHIFT)
#define HIFC_AF4_CLEAR(val, member) \
((val) & (~(HIFC_AF4_##member##_MASK << \
HIFC_AF4_##member##_SHIFT)))
#define HIFC_AF5_PF_STATUS_SHIFT 0
#define HIFC_AF5_PF_STATUS_MASK 0xFFFF
#define HIFC_AF5_SET(val, member) \
(((val) & HIFC_AF5_##member##_MASK) << HIFC_AF5_##member##_SHIFT)
#define HIFC_AF5_GET(val, member) \
(((val) >> HIFC_AF5_##member##_SHIFT) & HIFC_AF5_##member##_MASK)
#define HIFC_PPF_ELECTION_IDX_SHIFT 0
#define HIFC_PPF_ELECTION_IDX_MASK 0x1F
#define HIFC_PPF_ELECTION_SET(val, member) \
(((val) & HIFC_PPF_ELECTION_##member##_MASK) << \
HIFC_PPF_ELECTION_##member##_SHIFT)
#define HIFC_PPF_ELECTION_GET(val, member) \
(((val) >> HIFC_PPF_ELECTION_##member##_SHIFT) & \
HIFC_PPF_ELECTION_##member##_MASK)
#define HIFC_PPF_ELECTION_CLEAR(val, member) \
((val) & (~(HIFC_PPF_ELECTION_##member##_MASK \
<< HIFC_PPF_ELECTION_##member##_SHIFT)))
#define HIFC_MPF_ELECTION_IDX_SHIFT 0
#define HIFC_MPF_ELECTION_IDX_MASK 0x1F
#define HIFC_MPF_ELECTION_SET(val, member) \
(((val) & HIFC_MPF_ELECTION_##member##_MASK) << \
HIFC_MPF_ELECTION_##member##_SHIFT)
#define HIFC_MPF_ELECTION_GET(val, member) \
(((val) >> HIFC_MPF_ELECTION_##member##_SHIFT) & \
HIFC_MPF_ELECTION_##member##_MASK)
#define HIFC_MPF_ELECTION_CLEAR(val, member) \
((val) & (~(HIFC_MPF_ELECTION_##member##_MASK \
<< HIFC_MPF_ELECTION_##member##_SHIFT)))
#define HIFC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
#define HIFC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
#define HIFC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
#define HIFC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx)
#define HIFC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type)
#define HIFC_IS_PPF(dev) (HIFC_FUNC_TYPE(dev) == TYPE_PPF)
enum hifc_pcie_nosnoop {
HIFC_PCIE_SNOOP = 0,
HIFC_PCIE_NO_SNOOP = 1,
};
enum hifc_pcie_tph {
HIFC_PCIE_TPH_DISABLE = 0,
HIFC_PCIE_TPH_ENABLE = 1,
};
enum hifc_pf_status {
HIFC_PF_STATUS_INIT = 0X0,
HIFC_PF_STATUS_ACTIVE_FLAG = 0x11,
HIFC_PF_STATUS_FLR_START_FLAG = 0x12,
HIFC_PF_STATUS_FLR_FINISH_FLAG = 0x13,
};
enum hifc_outbound_ctrl {
ENABLE_OUTBOUND = 0x0,
DISABLE_OUTBOUND = 0x1,
};
enum hifc_doorbell_ctrl {
ENABLE_DOORBELL = 0x0,
DISABLE_DOORBELL = 0x1,
};
struct hifc_free_db_area {
u32 db_idx[HIFC_DB_MAX_AREAS];
u32 num_free;
u32 alloc_pos;
u32 return_pos;
/* spinlock for allocating doorbell area */
spinlock_t idx_lock;
};
enum func_type {
TYPE_PF,
TYPE_VF,
TYPE_PPF,
TYPE_UNKNOWN,
};
struct hifc_func_attr {
u16 func_global_idx;
u8 port_to_port_idx;
u8 pci_intf_idx;
u8 vf_in_pf;
enum func_type func_type;
u8 mpf_idx;
u8 ppf_idx;
u16 num_irqs; /* max: 2 ^ 15 */
u8 num_aeqs; /* max: 2 ^ 3 */
u8 num_ceqs; /* max: 2 ^ 7 */
u8 num_dma_attr; /* max: 2 ^ 6 */
};
struct hifc_hwif {
u8 __iomem *cfg_regs_base;
u8 __iomem *intr_regs_base;
u64 db_base_phy;
u8 __iomem *db_base;
#if defined(__aarch64__)
void __iomem *dwqe_mapping;
#else
struct io_mapping *dwqe_mapping;
#endif
struct hifc_free_db_area free_db_area;
struct hifc_func_attr attr;
void *pdev;
};
struct hifc_dma_addr_align {
u32 real_size;
void *ori_vaddr;
dma_addr_t ori_paddr;
void *align_vaddr;
dma_addr_t align_paddr;
};
u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg);
void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val);
void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status);
enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif);
enum hifc_doorbell_ctrl
hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif);
enum hifc_outbound_ctrl
hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif);
void hifc_enable_doorbell(struct hifc_hwif *hwif);
void hifc_disable_doorbell(struct hifc_hwif *hwif);
int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base,
void *intr_reg_base, u64 db_base_phy,
void *db_base, void *dwqe_mapping);
void hifc_free_hwif(struct hifc_hwdev *hwdev);
int hifc_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align,
unsigned flag,
struct hifc_dma_addr_align *mem_align);
void hifc_dma_free_coherent_align(void *dev_hdl,
struct hifc_dma_addr_align *mem_align);
#endif

1243
hifc/hifc_io.c Normal file

File diff suppressed because it is too large Load Diff

66
hifc/hifc_io.h Normal file
View File

@ -0,0 +1,66 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_IO_H__
#define __HIFC_IO_H__
enum dif_mode_e {
DIF_MODE_NONE = 0x0,
DIF_MODE_INSERT = 0x1,
DIF_MODE_REMOVE = 0x2,
DIF_MODE_FORWARD_OR_REPLACE = 0x3
};
enum ref_tag_mode_e {
BOTH_NONE = 0x0,
RECEIVE_INCREASE = 0x1,
REPLACE_INCREASE = 0x2,
BOTH_INCREASE = 0x3
};
#define HIFC_DIF_DISABLE 0
#define HIFC_DIF_ENABLE 1
#define HIFC_DIF_SECTOR_512B_MODE 0
#define HIFC_DIF_SECTOR_4KB_MODE 1
#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16 0x0
#define HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM 0x1
#define HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16 0x2
#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_IP_CHECKSUM 0x3
#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_REGISTER 0
#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1 0x4
#define HIFC_DIF_GARD_REF_APP_CTRL_VERIFY 0x4
#define HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY 0x0
#define HIFC_DIF_GARD_REF_APP_CTRL_INSERT 0x0
#define HIFC_DIF_GARD_REF_APP_CTRL_DELETE 0x1
#define HIFC_DIF_GARD_REF_APP_CTRL_FORWARD 0x2
#define HIFC_DIF_GARD_REF_APP_CTRL_REPLACE 0x3
#define HIFC_DIF_ERROR_CODE_MASK 0xe
#define HIFC_DIF_ERROR_CODE_CRC 0x2
#define HIFC_DIF_ERROR_CODE_REF 0x4
#define HIFC_DIF_ERROR_CODE_APP 0x8
#define HIFC_DIF_SEND_DIFERR_PAYLOAD 0
#define HIFC_DIF_SEND_DIFERR_CRC 1
#define HIFC_DIF_SEND_DIFERR_APP 2
#define HIFC_DIF_SEND_DIFERR_REF 3
#define HIFC_DIF_RECV_DIFERR_ALL 4
#define HIFC_DIF_RECV_DIFERR_CRC 5
#define HIFC_DIF_RECV_DIFERR_APP 6
#define HIFC_DIF_RECV_DIFERR_REF 7
#define HIFC_SECT_SIZE_512 512
#define HIFC_SECT_SIZE_4096 4096
#define HIFC_SECT_SIZE_512_8 520
#define HIFC_SECT_SIZE_4096_8 4104
#define HIFC_CTRL_MASK 0x1f
unsigned int hifc_send_scsi_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg);
unsigned int hifc_scq_recv_iresp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_wqe);
#endif /* __HIFC_IO_H__ */

48
hifc/hifc_knl_adp.h Normal file
View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_KNL_ADP_H_
#define HIFC_KNL_ADP_H_
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/cpufreq.h>
#include <linux/semaphore.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/version.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <linux/sched/signal.h>
#define __TIME_STR__ "[compiled with the kernel]"
#define sdk_err(dev, format, ...) \
dev_err(dev, "[COMM]"format, ##__VA_ARGS__)
#define sdk_warn(dev, format, ...) \
dev_warn(dev, "[COMM]"format, ##__VA_ARGS__)
#define sdk_notice(dev, format, ...) \
dev_notice(dev, "[COMM]"format, ##__VA_ARGS__)
#define sdk_info(dev, format, ...) \
dev_info(dev, "[COMM]"format, ##__VA_ARGS__)
#endif

891
hifc/hifc_lld.c Normal file
View File

@ -0,0 +1,891 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_lld.h"
#include "hifc_dbgtool_knl.h"
#include "hifc_tool.h"
#define HIFC_PCI_CFG_REG_BAR 0
#define HIFC_PCI_INTR_REG_BAR 2
#define HIFC_PCI_DB_BAR 4
#define HIFC_SECOND_BASE 1000
#define HIFC_SYNC_YEAR_OFFSET 1900
#define HIFC_SYNC_MONTH_OFFSET 1
#define HIFC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver"
#define HIFCVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver"
MODULE_AUTHOR("Huawei Technologies CO., Ltd");
MODULE_DESCRIPTION(HIFC_DRV_DESC);
MODULE_VERSION(HIFC_DRV_VERSION);
MODULE_LICENSE("GPL");
#define HIFC_EVENT_PROCESS_TIMEOUT 10000
#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0)
#define SET_BIT(num, n) ((num) | (1UL << (n)))
#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n))))
#define MAX_CARD_ID 64
static u64 card_bit_map;
LIST_HEAD(g_hifc_chip_list);
enum hifc_lld_status {
HIFC_NODE_CHANGE = BIT(0),
};
struct hifc_lld_lock {
/* lock for chip list */
struct mutex lld_mutex;
unsigned long status;
atomic_t dev_ref_cnt;
};
static struct hifc_lld_lock g_lld_lock;
#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */
#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */
#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */
/* node in chip_node will changed, tools or driver can't get node
* during this situation
*/
static void lld_lock_chip_node(void)
{
u32 loop_cnt;
mutex_lock(&g_lld_lock.lld_mutex);
loop_cnt = 0;
while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) {
if (!test_and_set_bit(HIFC_NODE_CHANGE, &g_lld_lock.status))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait for lld node change complete for %us\n",
loop_cnt / 1000);
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED)
pr_warn("Wait for lld node change complete timeout when try to get lld lock\n");
loop_cnt = 0;
while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) {
if (!atomic_read(&g_lld_lock.dev_ref_cnt))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait for lld dev unused for %us, reference count: %d\n",
loop_cnt / 1000,
atomic_read(&g_lld_lock.dev_ref_cnt));
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY)
pr_warn("Wait for lld dev unused timeout\n");
mutex_unlock(&g_lld_lock.lld_mutex);
}
static void lld_unlock_chip_node(void)
{
clear_bit(HIFC_NODE_CHANGE, &g_lld_lock.status);
}
/* When tools or other drivers want to get node of chip_node, use this function
* to prevent node be freed
*/
void lld_dev_hold(void)
{
u32 loop_cnt = 0;
/* ensure there have not any chip node in changing */
mutex_lock(&g_lld_lock.lld_mutex);
while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) {
if (!test_bit(HIFC_NODE_CHANGE, &g_lld_lock.status))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait lld node change complete for %us\n",
loop_cnt / 1000);
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT)
pr_warn("Wait lld node change complete timeout when try to hode lld dev\n");
atomic_inc(&g_lld_lock.dev_ref_cnt);
mutex_unlock(&g_lld_lock.lld_mutex);
}
void lld_dev_put(void)
{
atomic_dec(&g_lld_lock.dev_ref_cnt);
}
static void hifc_lld_lock_init(void)
{
mutex_init(&g_lld_lock.lld_mutex);
atomic_set(&g_lld_lock.dev_ref_cnt, 0);
}
extern int hifc_probe(struct hifc_lld_dev *lld_dev,
void **uld_dev, char *uld_dev_name);
static int attach_uld(struct hifc_pcidev *dev)
{
void *uld_dev = NULL;
int err;
mutex_lock(&dev->pdev_mutex);
if (dev->init_state < HIFC_INIT_STATE_HWDEV_INITED) {
sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n");
err = -EFAULT;
goto out_unlock;
}
err = hifc_stateful_init(dev->hwdev);
if (err)
goto out_unlock;
err = hifc_probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name);
if (err || !uld_dev) {
sdk_err(&dev->pcidev->dev,
"Failed to add object for driver to pcie device\n");
goto probe_failed;
}
dev->uld_dev = uld_dev;
mutex_unlock(&dev->pdev_mutex);
sdk_info(&dev->pcidev->dev,
"Attach driver to pcie device succeed\n");
return 0;
probe_failed:
hifc_stateful_deinit(dev->hwdev);
out_unlock:
mutex_unlock(&dev->pdev_mutex);
return err;
}
extern void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev);
static void detach_uld(struct hifc_pcidev *dev)
{
u32 cnt = 0;
mutex_lock(&dev->pdev_mutex);
while (cnt < HIFC_EVENT_PROCESS_TIMEOUT) {
if (!test_and_set_bit(SERVICE_T_FC, &dev->state))
break;
usleep_range(900, 1000);
cnt++;
}
hifc_remove(&dev->lld_dev, dev->uld_dev);
dev->uld_dev = NULL;
hifc_stateful_deinit(dev->hwdev);
if (cnt < HIFC_EVENT_PROCESS_TIMEOUT)
clear_bit(SERVICE_T_FC, &dev->state);
sdk_info(&dev->pcidev->dev,
"Detach driver from pcie device succeed\n");
mutex_unlock(&dev->pdev_mutex);
}
static void hifc_sync_time_to_fmw(struct hifc_pcidev *pdev_pri)
{
struct tm tm = {0};
u64 tv_msec;
int err;
tv_msec = ktime_to_ms(ktime_get_real());
err = hifc_sync_time(pdev_pri->hwdev, tv_msec);
if (err) {
sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n",
err);
} else {
time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm);
sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n",
tm.tm_year + HIFC_SYNC_YEAR_OFFSET,
tm.tm_mon + HIFC_SYNC_MONTH_OFFSET,
tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
}
}
#define MAX_VER_FIELD_LEN 4
#define MAX_VER_SPLIT_NUM 4
struct mctp_hdr {
u16 resp_code;
u16 reason_code;
u32 manufacture_id;
u8 cmd_rsvd;
u8 major_cmd;
u8 sub_cmd;
u8 spc_field;
};
struct mctp_bdf_info {
struct mctp_hdr hdr; /* spc_field: pf index */
u8 rsvd;
u8 bus;
u8 device;
u8 function;
};
static void __mctp_set_hdr(struct mctp_hdr *hdr,
struct hifc_mctp_host_info *mctp_info)
{
u32 manufacture_id = 0x07DB;
hdr->cmd_rsvd = 0;
hdr->major_cmd = mctp_info->major_cmd;
hdr->sub_cmd = mctp_info->sub_cmd;
hdr->manufacture_id = cpu_to_be32(manufacture_id);
hdr->resp_code = cpu_to_be16(hdr->resp_code);
hdr->reason_code = cpu_to_be16(hdr->reason_code);
}
static void __mctp_get_bdf(struct hifc_pcidev *pci_adapter,
struct hifc_mctp_host_info *mctp_info)
{
struct pci_dev *pdev = pci_adapter->pcidev;
struct mctp_bdf_info *bdf_info = mctp_info->data;
bdf_info->bus = pdev->bus->number;
bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */
bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */
memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr));
__mctp_set_hdr(&bdf_info->hdr, mctp_info);
bdf_info->hdr.spc_field =
(u8)hifc_global_func_id_hw(pci_adapter->hwdev);
mctp_info->data_len = sizeof(*bdf_info);
}
#define MCTP_PUBLIC_SUB_CMD_BDF 0x1
static void __mctp_get_host_info(struct hifc_pcidev *dev,
struct hifc_mctp_host_info *mctp_info)
{
#define COMMAND_UNSUPPORTED 3
struct mctp_hdr *hdr;
if (((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) ==
MCTP_PUBLIC_SUB_CMD_BDF) {
__mctp_get_bdf(dev, mctp_info);
} else {
hdr = mctp_info->data;
hdr->reason_code = COMMAND_UNSUPPORTED;
__mctp_set_hdr(hdr, mctp_info);
mctp_info->data_len = sizeof(*hdr);
}
}
void *hifc_get_ppf_hwdev_by_pdev(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter;
struct card_node *chip_node;
struct hifc_pcidev *dev;
if (!pdev)
return NULL;
pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter)
return NULL;
chip_node = pci_adapter->chip_node;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (dev->hwdev && hifc_func_type(dev->hwdev) == TYPE_PPF) {
lld_dev_put();
return dev->hwdev;
}
}
lld_dev_put();
return NULL;
}
void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev,
struct hifc_event_info *event);
void hifc_event_process(void *adapter, struct hifc_event_info *event)
{
struct hifc_pcidev *dev = adapter;
if (event->type == HIFC_EVENT_FMW_ACT_NTC)
return hifc_sync_time_to_fmw(dev);
else if (event->type == HIFC_EVENT_MCTP_GET_HOST_INFO)
return __mctp_get_host_info(dev, &event->mctp_info);
if (test_and_set_bit(SERVICE_T_FC, &dev->state)) {
sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler is in detach\n",
event->type);
return;
}
hifc_event(&dev->lld_dev, dev->uld_dev, event);
clear_bit(SERVICE_T_FC, &dev->state);
}
static int mapping_bar(struct pci_dev *pdev, struct hifc_pcidev *pci_adapter)
{
u64 dwqe_addr;
pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, HIFC_PCI_CFG_REG_BAR);
if (!pci_adapter->cfg_reg_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map configuration regs\n");
return -ENOMEM;
}
pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
HIFC_PCI_INTR_REG_BAR);
if (!pci_adapter->intr_reg_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map interrupt regs\n");
goto map_intr_bar_err;
}
pci_adapter->db_base_phy = pci_resource_start(pdev, HIFC_PCI_DB_BAR);
pci_adapter->db_base = ioremap(pci_adapter->db_base_phy,
HIFC_DB_DWQE_SIZE);
if (!pci_adapter->db_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map doorbell regs\n");
goto map_db_err;
}
dwqe_addr = pci_adapter->db_base_phy + HIFC_DB_DWQE_SIZE;
#if defined(__aarch64__)
/* arm do not support call ioremap_wc() */
pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, HIFC_DB_DWQE_SIZE,
__pgprot(PROT_DEVICE_nGnRnE));
#else
pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr,
HIFC_DB_DWQE_SIZE);
#endif /* end of "defined(__aarch64__)" */
if (!pci_adapter->dwqe_mapping) {
sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n");
goto mapping_dwqe_err;
}
return 0;
mapping_dwqe_err:
iounmap(pci_adapter->db_base);
map_db_err:
iounmap(pci_adapter->intr_reg_base);
map_intr_bar_err:
iounmap(pci_adapter->cfg_reg_base);
return -ENOMEM;
}
static void unmapping_bar(struct hifc_pcidev *pci_adapter)
{
#if defined(__aarch64__)
iounmap(pci_adapter->dwqe_mapping);
#else
io_mapping_free(pci_adapter->dwqe_mapping);
#endif /* end of "defined(__aarch64__)" */
iounmap(pci_adapter->db_base);
iounmap(pci_adapter->intr_reg_base);
iounmap(pci_adapter->cfg_reg_base);
}
static int alloc_chip_node(struct hifc_pcidev *pci_adapter)
{
struct card_node *chip_node;
unsigned char i;
unsigned char parent_bus_number = 0;
if (!pci_is_root_bus(pci_adapter->pcidev->bus))
parent_bus_number = pci_adapter->pcidev->bus->parent->number;
if (parent_bus_number != 0) {
list_for_each_entry(chip_node, &g_hifc_chip_list, node) {
if (chip_node->dp_bus_num == parent_bus_number) {
pci_adapter->chip_node = chip_node;
return 0;
}
}
}
for (i = 0; i < MAX_CARD_ID; i++) {
if (!FIND_BIT(card_bit_map, i)) {
card_bit_map = (u64)SET_BIT(card_bit_map, i);
break;
}
}
if (i == MAX_CARD_ID) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc card id\n");
return -EFAULT;
}
chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL);
if (!chip_node) {
card_bit_map = CLEAR_BIT(card_bit_map, i);
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc chip node\n");
return -ENOMEM;
}
chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL);
if (!(chip_node->dbgtool_attr_file.name)) {
kfree(chip_node);
card_bit_map = CLEAR_BIT(card_bit_map, i);
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc dbgtool attr file name\n");
return -ENOMEM;
}
/* parent bus number */
chip_node->dp_bus_num = parent_bus_number;
snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i);
snprintf((char *)chip_node->dbgtool_attr_file.name,
IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i);
sdk_info(&pci_adapter->pcidev->dev,
"Add new chip %s to global list succeed\n",
chip_node->chip_name);
list_add_tail(&chip_node->node, &g_hifc_chip_list);
INIT_LIST_HEAD(&chip_node->func_list);
pci_adapter->chip_node = chip_node;
mutex_init(&chip_node->sfp_mutex);
return 0;
}
static void free_chip_node(struct hifc_pcidev *pci_adapter)
{
struct card_node *chip_node = pci_adapter->chip_node;
u32 id;
int err;
if (list_empty(&chip_node->func_list)) {
list_del(&chip_node->node);
sdk_info(&pci_adapter->pcidev->dev,
"Delete chip %s from global list succeed\n",
chip_node->chip_name);
err = sscanf(chip_node->chip_name, HIFC_CHIP_NAME "%u", &id);
if (err < 0)
sdk_err(&pci_adapter->pcidev->dev, "Failed to get hifc id\n");
card_bit_map = CLEAR_BIT(card_bit_map, id);
kfree(chip_node->dbgtool_attr_file.name);
kfree(chip_node);
}
}
static int config_pci_dma_mask(struct pci_dev *pdev)
{
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
sdk_err(&pdev->dev, "Failed to set DMA mask\n");
return err;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
sdk_warn(&pdev->dev,
"Couldn't set 64-bit coherent DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
sdk_err(&pdev->dev,
"Failed to set coherent DMA mask\n");
return err;
}
}
return 0;
}
static int hifc_pci_init(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = NULL;
int err;
pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
if (!pci_adapter) {
sdk_err(&pdev->dev,
"Failed to alloc pci device adapter\n");
return -ENOMEM;
}
pci_adapter->pcidev = pdev;
mutex_init(&pci_adapter->pdev_mutex);
pci_set_drvdata(pdev, pci_adapter);
err = pci_enable_device(pdev);
if (err) {
sdk_err(&pdev->dev, "Failed to enable PCI device\n");
goto pci_enable_err;
}
err = pci_request_regions(pdev, HIFC_DRV_NAME);
if (err) {
sdk_err(&pdev->dev, "Failed to request regions\n");
goto pci_regions_err;
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
err = config_pci_dma_mask(pdev);
if (err)
goto dma_mask_err;
return 0;
dma_mask_err:
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_regions_err:
pci_disable_device(pdev);
pci_enable_err:
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
return err;
}
static void hifc_pci_deinit(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
}
static int hifc_func_init(struct pci_dev *pdev,
struct hifc_pcidev *pci_adapter)
{
struct hifc_init_para init_para;
int err;
init_para.adapter_hdl = pci_adapter;
init_para.pcidev_hdl = pdev;
init_para.dev_hdl = &pdev->dev;
init_para.cfg_reg_base = pci_adapter->cfg_reg_base;
init_para.intr_reg_base = pci_adapter->intr_reg_base;
init_para.db_base = pci_adapter->db_base;
init_para.db_base_phy = pci_adapter->db_base_phy;
init_para.dwqe_mapping = pci_adapter->dwqe_mapping;
init_para.hwdev = &pci_adapter->hwdev;
init_para.chip_node = pci_adapter->chip_node;
init_para.ppf_hwdev = hifc_get_ppf_hwdev_by_pdev(pdev);
err = hifc_init_hwdev(&init_para);
if (err) {
pci_adapter->hwdev = NULL;
sdk_err(&pdev->dev, "Failed to initialize hardware device\n");
return -EFAULT;
}
pci_adapter->init_state = HIFC_INIT_STATE_HWDEV_INITED;
pci_adapter->lld_dev.pdev = pdev;
pci_adapter->lld_dev.hwdev = pci_adapter->hwdev;
hifc_event_register(pci_adapter->hwdev, pci_adapter,
hifc_event_process);
hifc_sync_time_to_fmw(pci_adapter);
lld_lock_chip_node();
err = hifc_dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node);
if (err) {
lld_unlock_chip_node();
sdk_err(&pdev->dev, "Failed to initialize dbgtool\n");
hifc_event_unregister(pci_adapter->hwdev);
return err;
}
lld_unlock_chip_node();
pci_adapter->init_state = HIFC_INIT_STATE_DBGTOOL_INITED;
attach_uld(pci_adapter);
sdk_info(&pdev->dev, "Pcie device probed\n");
pci_adapter->init_state = HIFC_INIT_STATE_ALL_INITED;
return 0;
}
static void hifc_func_deinit(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev);
/* When function deinit, disable mgmt initiative report events firstly,
* then flush mgmt work-queue.
*/
if (pci_adapter->init_state >= HIFC_INIT_STATE_ALL_INITED)
detach_uld(pci_adapter);
hifc_disable_mgmt_msg_report(pci_adapter->hwdev);
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_PART_INITED)
hifc_flush_mgmt_workq(pci_adapter->hwdev);
hifc_set_func_deinit_flag(pci_adapter->hwdev);
if (pci_adapter->init_state >= HIFC_INIT_STATE_DBGTOOL_INITED) {
lld_lock_chip_node();
hifc_dbgtool_knl_deinit(pci_adapter->hwdev, pci_adapter->chip_node);
lld_unlock_chip_node();
hifc_event_unregister(pci_adapter->hwdev);
}
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) {
/*Remove the current node from node-list first,
* then it's safe to free hwdev
*/
lld_lock_chip_node();
list_del(&pci_adapter->node);
lld_unlock_chip_node();
hifc_free_hwdev(pci_adapter->hwdev);
}
}
static void remove_func(struct hifc_pcidev *pci_adapter)
{
struct pci_dev *pdev = pci_adapter->pcidev;
switch (pci_adapter->init_state) {
case HIFC_INIT_STATE_ALL_INITED:
/*lint -fallthrough*/
case HIFC_INIT_STATE_DBGTOOL_INITED:
case HIFC_INIT_STATE_HWDEV_INITED:
case HIFC_INIT_STATE_HW_PART_INITED:
case HIFC_INIT_STATE_HW_IF_INITED:
case HIFC_INIT_STATE_PCI_INITED:
set_bit(HIFC_FUNC_IN_REMOVE, &pci_adapter->flag);
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED)
hifc_func_deinit(pdev);
lld_lock_chip_node();
if (pci_adapter->init_state < HIFC_INIT_STATE_HW_IF_INITED)
list_del(&pci_adapter->node);
hifc_tool_k_uninit();
free_chip_node(pci_adapter);
lld_unlock_chip_node();
unmapping_bar(pci_adapter);
hifc_pci_deinit(pdev);
/*lint -fallthrough*/
break;
default:
break;
}
}
static void hifc_hwdev_remove(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter)
return;
sdk_info(&pdev->dev, "Pcie device remove begin\n");
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED)
hifc_detect_hw_present(pci_adapter->hwdev);
remove_func(pci_adapter);
sdk_info(&pdev->dev, "Pcie device removed\n");
}
static int hifc_hwdev_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct hifc_pcidev *pci_adapter;
int err;
sdk_info(&pdev->dev, "Pcie device probe begin\n");
err = hifc_pci_init(pdev);
if (err)
return err;
pci_adapter = pci_get_drvdata(pdev);
clear_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag);
clear_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag);
err = mapping_bar(pdev, pci_adapter);
if (err) {
sdk_err(&pdev->dev, "Failed to map bar\n");
goto map_bar_failed;
}
pci_adapter->id = *id;
/* if chip information of pcie function exist,
* add the function into chip
*/
lld_lock_chip_node();
err = alloc_chip_node(pci_adapter);
if (err) {
sdk_err(&pdev->dev,
"Failed to add new chip node to global list\n");
goto alloc_chip_node_fail;
}
err = hifc_tool_k_init();
if (err) {
sdk_warn(&pdev->dev, "Failed to init nictool");
goto init_nictool_err;
}
list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list);
lld_unlock_chip_node();
pci_adapter->init_state = HIFC_INIT_STATE_PCI_INITED;
err = hifc_func_init(pdev, pci_adapter);
if (err)
goto func_init_err;
return 0;
func_init_err:
if (!test_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag))
set_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag);
return 0;
init_nictool_err:
free_chip_node(pci_adapter);
alloc_chip_node_fail:
lld_unlock_chip_node();
unmapping_bar(pci_adapter);
map_bar_failed:
hifc_pci_deinit(pdev);
sdk_err(&pdev->dev, "Pcie device probe failed\n");
return err;
}
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define HIFC_DEV_ID_1822_8G 0x0212
#define HIFC_DEV_ID_1822_16G 0x0203
#define HIFC_DEV_ID_1822_32G 0x0202
/*lint -save -e133 -e10*/
static const struct pci_device_id hifc_pci_table[] = {
{PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_8G), 0},
{PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_16G), 0},
{PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_32G), 0},
{0, 0}
};
/*lint -restore*/
MODULE_DEVICE_TABLE(pci, hifc_pci_table);
static void hifc_shutdown(struct pci_dev *pdev)
{
sdk_err(&pdev->dev, "Shutdown device\n");
pci_disable_device(pdev);
}
static struct pci_driver hifc_driver = {
.name = HIFC_DRV_NAME,
.id_table = hifc_pci_table,
.probe = hifc_hwdev_probe,
.remove = hifc_hwdev_remove,
.shutdown = hifc_shutdown,
};
extern int hifc_init_module(void);
extern void hifc_exit_module(void);
static int __init hifc_lld_init(void)
{
pr_info("%s - version %s\n", HIFC_DRV_DESC, HIFC_DRV_VERSION);
hifc_lld_lock_init();
hifc_init_module();
return pci_register_driver(&hifc_driver);
}
static void __exit hifc_lld_exit(void)
{
pci_unregister_driver(&hifc_driver);
hifc_exit_module();
}
module_init(hifc_lld_init);
module_exit(hifc_lld_exit);

79
hifc/hifc_lld.h Normal file
View File

@ -0,0 +1,79 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_LLD_H_
#define HIFC_LLD_H_
#include "unf_common.h"
#define HIFC_PCI_VENDOR_ID (0x19e5)
#define HIFC_DRV_NAME "hifc_sdk"
#define HIFC_CHIP_NAME "hifc"
#define HIFC_DRV_VERSION UNF_FC_VERSION
struct hifc_lld_dev {
struct pci_dev *pdev;
void *hwdev;
};
extern struct list_head g_hifc_chip_list;
/* Structure pcidev private*/
struct hifc_pcidev {
struct pci_dev *pcidev;
void *hwdev;
struct card_node *chip_node;
struct hifc_lld_dev lld_dev;
/* Record the service object address,
* such as hifc_dev and toe_dev, fc_dev
*/
void *uld_dev;
/* Record the service object name */
char uld_dev_name[IFNAMSIZ];
/* It is a the global variable for driver to manage
* all function device linked list
*/
struct list_head node;
void __iomem *cfg_reg_base;
void __iomem *intr_reg_base;
u64 db_base_phy;
void __iomem *db_base;
#if defined(__aarch64__)
void __iomem *dwqe_mapping;
#else
struct io_mapping *dwqe_mapping;
#endif
/* lock for attach/detach uld */
struct mutex pdev_mutex;
u32 init_state;
/* setted when uld driver processing event */
unsigned long state;
struct pci_device_id id;
unsigned long flag;
};
enum {
HIFC_FUNC_IN_REMOVE = BIT(0),
HIFC_FUNC_PRB_ERR = BIT(1),
HIFC_FUNC_PRB_DELAY = BIT(2),
};
enum hifc_init_state {
HIFC_INIT_STATE_NONE,
HIFC_INIT_STATE_PCI_INITED,
HIFC_INIT_STATE_HW_IF_INITED,
HIFC_INIT_STATE_HW_PART_INITED,
HIFC_INIT_STATE_HWDEV_INITED,
HIFC_INIT_STATE_DBGTOOL_INITED,
HIFC_INIT_STATE_ALL_INITED,
};
void lld_dev_put(void);
void lld_dev_hold(void);
#endif

1425
hifc/hifc_mgmt.c Normal file

File diff suppressed because it is too large Load Diff

407
hifc/hifc_mgmt.h Normal file
View File

@ -0,0 +1,407 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_MGMT_H_
#define HIFC_MGMT_H_
#define HIFC_MSG_HEADER_MSG_LEN_SHIFT 0
#define HIFC_MSG_HEADER_MODULE_SHIFT 11
#define HIFC_MSG_HEADER_SEG_LEN_SHIFT 16
#define HIFC_MSG_HEADER_NO_ACK_SHIFT 22
#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
#define HIFC_MSG_HEADER_SEQID_SHIFT 24
#define HIFC_MSG_HEADER_LAST_SHIFT 30
#define HIFC_MSG_HEADER_DIRECTION_SHIFT 31
#define HIFC_MSG_HEADER_CMD_SHIFT 32
#define HIFC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48
#define HIFC_MSG_HEADER_P2P_IDX_SHIFT 50
#define HIFC_MSG_HEADER_MSG_ID_SHIFT 54
#define HIFC_MSG_HEADER_MSG_LEN_MASK 0x7FF
#define HIFC_MSG_HEADER_MODULE_MASK 0x1F
#define HIFC_MSG_HEADER_SEG_LEN_MASK 0x3F
#define HIFC_MSG_HEADER_NO_ACK_MASK 0x1
#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
#define HIFC_MSG_HEADER_SEQID_MASK 0x3F
#define HIFC_MSG_HEADER_LAST_MASK 0x1
#define HIFC_MSG_HEADER_DIRECTION_MASK 0x1
#define HIFC_MSG_HEADER_CMD_MASK 0xFF
#define HIFC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3
#define HIFC_MSG_HEADER_P2P_IDX_MASK 0xF
#define HIFC_MSG_HEADER_MSG_ID_MASK 0x3FF
#define HIFC_MSG_HEADER_GET(val, member) \
(((val) >> HIFC_MSG_HEADER_##member##_SHIFT) & \
HIFC_MSG_HEADER_##member##_MASK)
#define HIFC_MSG_HEADER_SET(val, member) \
((u64)((val) & HIFC_MSG_HEADER_##member##_MASK) << \
HIFC_MSG_HEADER_##member##_SHIFT)
#define HIFC_MGMT_WQ_NAME "hifc_mgmt"
/*CLP*/
enum clp_data_type {
HIFC_CLP_REQ_HOST = 0,
HIFC_CLP_RSP_HOST = 1
};
enum clp_reg_type {
HIFC_CLP_BA_HOST = 0,
HIFC_CLP_SIZE_HOST = 1,
HIFC_CLP_LEN_HOST = 2,
HIFC_CLP_START_REQ_HOST = 3,
HIFC_CLP_READY_RSP_HOST = 4
};
/* cmd of mgmt CPU message for HW module */
enum hifc_mgmt_cmd {
HIFC_MGMT_CMD_RESET_MGMT = 0x0,
HIFC_MGMT_CMD_START_FLR = 0x1,
HIFC_MGMT_CMD_FLUSH_DOORBELL = 0x2,
HIFC_MGMT_CMD_CMDQ_CTXT_SET = 0x10,
HIFC_MGMT_CMD_VAT_SET = 0x12,
HIFC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14,
HIFC_MGMT_CMD_PPF_TMR_SET = 0x22,
HIFC_MGMT_CMD_PPF_HT_GPA_SET = 0x23,
HIFC_MGMT_CMD_RES_STATE_SET = 0x24,
HIFC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32,
HIFC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
HIFC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
HIFC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
HIFC_MGMT_CMD_FAULT_REPORT = 0x37,
HIFC_MGMT_CMD_HEART_LOST_REPORT = 0x38,
HIFC_MGMT_CMD_SYNC_TIME = 0x46,
HIFC_MGMT_CMD_REG_READ = 0x48,
HIFC_MGMT_CMD_L2NIC_RESET = 0x4b,
HIFC_MGMT_CMD_ACTIVATE_FW = 0x4F,
HIFC_MGMT_CMD_PAGESIZE_SET = 0x50,
HIFC_MGMT_CMD_GET_BOARD_INFO = 0x52,
HIFC_MGMT_CMD_WATCHDOG_INFO = 0x56,
HIFC_MGMT_CMD_FMW_ACT_NTC = 0x57,
HIFC_MGMT_CMD_PCIE_DFX_NTC = 0x65,
HIFC_MGMT_CMD_PCIE_DFX_GET = 0x66,
HIFC_MGMT_CMD_GET_HOST_INFO = 0x67,
HIFC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A,
HIFC_MGMT_CMD_HEARTBEAT_EVENT = 0x6C,
};
#define HIFC_CLP_REG_GAP 0x20
#define HIFC_CLP_INPUT_BUFFER_LEN_HOST 2048UL
#define HIFC_CLP_OUTPUT_BUFFER_LEN_HOST 2048UL
#define HIFC_CLP_DATA_UNIT_HOST 4UL
#define HIFC_BAR01_GLOABAL_CTL_OFFSET 0x4000
#define HIFC_BAR01_CLP_OFFSET 0x5000
#define HIFC_CLP_SRAM_SIZE_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x220)
#define HIFC_CLP_REQ_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x224)
#define HIFC_CLP_RSP_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x228)
#define HIFC_CLP_REQ_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x22c)
#define HIFC_CLP_RSP_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x230)
#define HIFC_CLP_REG(member) (HIFC_CLP_##member##_REG)
#define HIFC_CLP_REQ_DATA (HIFC_BAR01_CLP_OFFSET)
#define HIFC_CLP_RSP_DATA (HIFC_BAR01_CLP_OFFSET + 0x1000)
#define HIFC_CLP_DATA(member) (HIFC_CLP_##member##_DATA)
#define HIFC_CLP_SRAM_SIZE_OFFSET 16
#define HIFC_CLP_SRAM_BASE_OFFSET 0
#define HIFC_CLP_LEN_OFFSET 0
#define HIFC_CLP_START_OFFSET 31
#define HIFC_CLP_READY_OFFSET 31
#define HIFC_CLP_OFFSET(member) (HIFC_CLP_##member##_OFFSET)
#define HIFC_CLP_SRAM_SIZE_BIT_LEN 0x7ffUL
#define HIFC_CLP_SRAM_BASE_BIT_LEN 0x7ffffffUL
#define HIFC_CLP_LEN_BIT_LEN 0x7ffUL
#define HIFC_CLP_START_BIT_LEN 0x1UL
#define HIFC_CLP_READY_BIT_LEN 0x1UL
#define HIFC_CLP_MASK(member) (HIFC_CLP_##member##_BIT_LEN)
#define HIFC_CLP_DELAY_CNT_MAX 200UL
#define HIFC_CLP_SRAM_SIZE_REG_MAX 0x3ff
#define HIFC_CLP_SRAM_BASE_REG_MAX 0x7ffffff
#define HIFC_CLP_LEN_REG_MAX 0x3ff
#define HIFC_CLP_START_OR_READY_REG_MAX 0x1
#define HIFC_MGMT_CMD_UNSUPPORTED 0xFF
enum hifc_msg_direction_type {
HIFC_MSG_DIRECT_SEND = 0,
HIFC_MSG_RESPONSE = 1
};
enum hifc_msg_segment_type {
NOT_LAST_SEGMENT = 0,
LAST_SEGMENT = 1,
};
enum hifc_mgmt_msg_type {
ASYNC_MGMT_MSG = 0,
SYNC_MGMT_MSG = 1,
};
enum hifc_msg_ack_type {
HIFC_MSG_ACK = 0,
HIFC_MSG_NO_ACK = 1,
};
struct hifc_recv_msg {
void *msg;
struct completion recv_done;
u16 msg_len;
enum hifc_mod_type mod;
u8 cmd;
u8 seq_id;
u16 msg_id;
int async_mgmt_to_pf;
};
struct hifc_msg_head {
u8 status;
u8 version;
u8 resp_aeq_num;
u8 rsvd0[5];
};
#define HIFC_COMM_SELF_CMD_MAX 8
struct comm_up_self_msg_sub_info {
u8 cmd;
comm_up_self_msg_proc proc;
};
struct comm_up_self_msg_info {
u8 cmd_num;
struct comm_up_self_msg_sub_info info[HIFC_COMM_SELF_CMD_MAX];
};
enum comm_pf_to_mgmt_event_state {
SEND_EVENT_UNINIT = 0,
SEND_EVENT_START,
SEND_EVENT_FAIL,
SEND_EVENT_TIMEOUT,
SEND_EVENT_END,
};
enum hifc_mgmt_msg_cb_state {
HIFC_MGMT_MSG_CB_REG = 0,
HIFC_MGMT_MSG_CB_RUNNING,
};
struct hifc_clp_pf_to_mgmt {
struct semaphore clp_msg_lock;
void *clp_msg_buf;
};
struct hifc_msg_pf_to_mgmt {
struct hifc_hwdev *hwdev;
/* Async cmd can not be scheduling */
spinlock_t async_msg_lock;
struct semaphore sync_msg_lock;
struct workqueue_struct *workq;
void *async_msg_buf;
void *sync_msg_buf;
void *mgmt_ack_buf;
struct hifc_recv_msg recv_msg_from_mgmt;
struct hifc_recv_msg recv_resp_msg_from_mgmt;
u16 async_msg_id;
u16 sync_msg_id;
struct hifc_api_cmd_chain *cmd_chain[HIFC_API_CMD_MAX];
hifc_mgmt_msg_cb recv_mgmt_msg_cb[HIFC_MOD_HW_MAX];
void *recv_mgmt_msg_data[HIFC_MOD_HW_MAX];
unsigned long mgmt_msg_cb_state[HIFC_MOD_HW_MAX];
struct comm_up_self_msg_info proc;
/* lock when sending msg */
spinlock_t sync_event_lock;
enum comm_pf_to_mgmt_event_state event_flag;
};
struct hifc_mgmt_msg_handle_work {
struct work_struct work;
struct hifc_msg_pf_to_mgmt *pf_to_mgmt;
void *msg;
u16 msg_len;
enum hifc_mod_type mod;
u8 cmd;
u16 msg_id;
int async_mgmt_to_pf;
};
/* show each drivers only such as nic_service_cap,
* toe_service_cap structure, but not show service_cap
*/
enum hifc_service_type {
SERVICE_T_NIC = 0,
SERVICE_T_FC = 5,
SERVICE_T_MAX,
/* Only used for interruption resource management,
* mark the request module
*/
SERVICE_T_INTF = (1 << 15),
SERVICE_T_CQM = (1 << 16),
};
/* NIC service capability
* 1, The chip supports NIC RQ is 1K
* 2, PF/VF RQ specifications:
* disable RSS:
* disable VMDq: Each PF/VF at most 8 RQ
* enable the VMDq: Each PF/VF at most 1K RQ
* enable the RSS:
* disable VMDq: each PF at most 64 RQ, VF at most 32 RQ
* enable the VMDq: Each PF/VF at most 1K RQ
*
* 3, The chip supports NIC SQ is 1K
* 4, PF/VF SQ specifications:
* disable RSS:
* disable VMDq: Each PF/VF at most 8 SQ
* enable the VMDq: Each PF/VF at most 1K SQ
* enable the RSS:
* disable VMDq: each PF at most 64 SQ, VF at most 32 SQ
* enable the VMDq: Each PF/VF at most 1K SQ
*/
struct nic_service_cap {
/* PF resources*/
u16 max_sqs;
u16 max_rqs;
/* VF resources, vf obtain through the MailBox mechanism from
* according PF
*/
u16 vf_max_sqs;
u16 vf_max_rqs;
bool lro_en; /* LRO feature enable bit*/
u8 lro_sz; /* LRO context space: n*16B */
u8 tso_sz; /* TSO context space: n*16B */
u16 max_queue_allowed;
};
/* PF FC service resource structure defined*/
struct dev_fc_svc_cap {
/* PF Parent QPC */
u32 max_parent_qpc_num; /* max number is 2048*/
/* PF Child QPC */
u32 max_child_qpc_num; /* max number is 2048*/
/* PF SCQ */
u32 scq_num; /* 16 */
/* PF supports SRQ*/
u32 srq_num; /* Number of SRQ is 2*/
u8 vp_id_start;
u8 vp_id_end;
};
/* FC services*/
struct fc_service_cap {
struct dev_fc_svc_cap dev_fc_cap;
/* Parent QPC */
u32 parent_qpc_size; /* 256B */
/* Child QPC */
u32 child_qpc_size; /* 256B */
/* SQ */
u32 sqe_size; /* 128B(in linked list mode)*/
/* SCQ */
u32 scqc_size; /* Size of the Context 32B*/
u32 scqe_size; /* 64B */
/* SRQ */
u32 srqc_size; /* Size of SRQ Context (64B)*/
u32 srqe_size; /* 32B */
};
bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap);
/* Service interface for obtaining service_cap public fields*/
/* Obtain service_cap.host_oq_id_mask_val*/
u8 hifc_host_oq_id_mask(void *hwdev);
/* Obtain service_cap.dev_cap.max_sqs*/
u16 hifc_func_max_qnum(void *hwdev);
/* The following information is obtained from the bar space
* which is recorded by SDK layer.
* Here provide parameter query interface for service
*/
/* func_attr.glb_func_idx, global function index */
u16 hifc_global_func_id(void *hwdev);
/* func_attr.intr_num, MSI-X table entry in function*/
enum intr_type {
INTR_TYPE_MSIX,
INTR_TYPE_MSI,
INTR_TYPE_INT,
INTR_TYPE_NONE,
};
u8 hifc_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */
/* func_attr.func_type, 0-PF 1-VF 2-PPF */
enum func_type hifc_func_type(void *hwdev);
u8 hifc_ppf_idx(void *hwdev);
enum hifc_msix_state {
HIFC_MSIX_ENABLE,
HIFC_MSIX_DISABLE,
};
void hifc_set_msix_state(void *hwdev, u16 msix_idx,
enum hifc_msix_state flag);
/* Defines the IRQ information structure*/
struct irq_info {
u16 msix_entry_idx; /* IRQ corresponding index number */
u32 irq_id; /* the IRQ number from OS */
};
int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 req_num,
struct irq_info *irq_info_array, u16 *resp_num);
void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id);
int hifc_sync_time(void *hwdev, u64 time);
void hifc_disable_mgmt_msg_report(void *hwdev);
void hifc_set_func_deinit_flag(void *hwdev);
void hifc_flush_mgmt_workq(void *hwdev);
int hifc_global_func_id_get(void *hwdev, u16 *func_id);
u16 hifc_global_func_id_hw(void *hwdev);
int hifc_pf_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd,
void *buf_in, u16 in_size);
void hifc_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size);
int hifc_pf_to_mgmt_init(struct hifc_hwdev *hwdev);
void hifc_pf_to_mgmt_free(struct hifc_hwdev *hwdev);
int hifc_pf_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd,
void *buf_in, u16 in_size, void *buf_out,
u16 *out_size, u32 timeout);
int hifc_pf_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd,
void *buf_in, u16 in_size);
int hifc_pf_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd,
const void *buf_in, u16 in_size,
void *buf_out, u16 *out_size);
int hifc_clp_pf_to_mgmt_init(struct hifc_hwdev *hwdev);
void hifc_clp_pf_to_mgmt_free(struct hifc_hwdev *hwdev);
#endif

103
hifc/hifc_module.c Normal file
View File

@ -0,0 +1,103 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "hifc_module.h"
struct unf_cm_handle_op_s hifc_cm_handle;
unsigned int dif_sgl_mode;
unsigned int max_speed = HIFC_SPEED_32G;
unsigned int accum_db_num = 1;
unsigned int dif_type = 0x1;
unsigned int wqe_page_size = 4096;
unsigned int wqe_pre_load = 6;
unsigned int combo_length_kb = 8;
unsigned int cos_bit_map = 0x1f;
unsigned int hifc_dif_type;
unsigned int hifc_dif_enable;
unsigned char hifc_guard;
/* dfx counter */
atomic64_t rx_tx_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
atomic64_t rx_tx_err[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
atomic64_t scq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
atomic64_t aeq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
atomic64_t dif_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
atomic64_t mail_box_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
atomic64_t up_err_event_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
unsigned long long link_event_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_EVENT_CNT];
unsigned long long link_reason_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_REASON_CNT];
unsigned long long hba_stat[HIFC_MAX_PORT_NUM][HIFC_HBA_STAT_BUTT];
atomic64_t com_up_event_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
static void hifc_realease_cmo_op_handle(void)
{
memset(&hifc_cm_handle, 0, sizeof(struct unf_cm_handle_op_s));
}
static void hifc_check_module_para(void)
{
if (dif_sgl_mode != 0)
dif_sgl_mode = 1;
}
int hifc_init_module(void)
{
int ret = RETURN_OK;
ret = unf_common_init();
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]unf_common_init failed");
return RETURN_ERROR_S32;
}
memset(rx_tx_stat, 0, sizeof(rx_tx_stat));
memset(rx_tx_err, 0, sizeof(rx_tx_err));
memset(scq_err_stat, 0, sizeof(scq_err_stat));
memset(aeq_err_stat, 0, sizeof(aeq_err_stat));
memset(dif_err_stat, 0, sizeof(dif_err_stat));
memset(link_event_stat, 0, sizeof(link_event_stat));
memset(link_reason_stat, 0, sizeof(link_reason_stat));
memset(hba_stat, 0, sizeof(hba_stat));
memset(&hifc_cm_handle, 0, sizeof(struct unf_cm_handle_op_s));
memset(up_err_event_stat, 0, sizeof(up_err_event_stat));
memset(mail_box_stat, 0, sizeof(mail_box_stat));
memset(hifc_hba, 0, sizeof(hifc_hba));
spin_lock_init(&probe_spin_lock);
/* 2. Module parameters check */
hifc_check_module_para();
/* 4. Get COM Handlers used for low_level */
if (unf_get_cm_handle_op(&hifc_cm_handle) != RETURN_OK) {
hifc_realease_cmo_op_handle();
return RETURN_ERROR_S32;
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Init HIFC module succeed");
return ret;
}
void hifc_exit_module(void)
{
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[event]HIFC module removing...");
hifc_realease_cmo_op_handle();
/* 2. Unregister FC COM module(level) */
unf_common_exit();
}
module_param(dif_sgl_mode, uint, 0444);
module_param(max_speed, uint, 0444);
module_param(wqe_page_size, uint, 0444);
module_param(combo_length_kb, uint, 0444);
module_param(cos_bit_map, uint, 0444);

289
hifc/hifc_module.h Normal file
View File

@ -0,0 +1,289 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_MODULE_H__
#define __HIFC_MODULE_H__
#include "unf_log.h"
#include "unf_common.h"
#include "hifc_utils.h"
#include "hifc_hba.h"
#define HIFC_SPEED_16G 0x10
#define HIFC_SPEED_32G 0x20
#define HIFC_MAX_PORT_NUM HIFC_MAX_PROBE_PORT_NUM
#define HIFC_TASK_TYPE_STAT_NUM 128
#define HIFC_MAX_LINK_EVENT_CNT 4
#define HIFC_MAX_LINK_REASON_CNT 256
/* Declare the global function. */
extern struct unf_cm_handle_op_s hifc_cm_handle;
extern unsigned int max_speed;
extern unsigned int accum_db_num;
extern unsigned int wqe_page_size;
extern unsigned int dif_type;
extern unsigned int wqe_pre_load;
extern unsigned int combo_length_kb;
extern unsigned int cos_bit_map;
extern atomic64_t rx_tx_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern atomic64_t rx_tx_err[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern atomic64_t scq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern atomic64_t aeq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern atomic64_t dif_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern atomic64_t mail_box_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern atomic64_t com_up_event_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern unsigned long long link_event_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_EVENT_CNT];
extern unsigned long long link_reason_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_REASON_CNT];
extern atomic64_t up_err_event_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM];
extern unsigned long long hba_stat[HIFC_MAX_PORT_NUM][HIFC_HBA_STAT_BUTT];
#define HIFC_LINK_EVENT_STAT(v_hba, link_ent) \
(link_event_stat[(v_hba)->probe_index][link_ent]++)
#define HIFC_LINK_REASON_STAT(v_hba, link_rsn) \
(link_reason_stat[(v_hba)->probe_index][link_rsn]++)
#define HIFC_HBA_STAT(v_hba, hba_stat_type) \
(hba_stat[(v_hba)->probe_index][hba_stat_type]++)
#define HIFC_UP_ERR_EVENT_STAT(v_hba, err_type) \
(atomic64_inc(&up_err_event_stat[(v_hba)->probe_index][err_type]))
#define HIFC_UP_ERR_EVENT_STAT_READ(probe_index, io_type) \
(atomic64_read(&up_err_event_stat[probe_index][io_type]))
#define HIFC_DIF_ERR_STAT(v_hba, dif_err) \
(atomic64_inc(&dif_err_stat[(v_hba)->probe_index][dif_err]))
#define HIFC_DIF_ERR_STAT_READ(probe_index, dif_err) \
(atomic64_read(&dif_err_stat[probe_index][dif_err]))
#define HIFC_IO_STAT(v_hba, io_type) \
(atomic64_inc(&rx_tx_stat[(v_hba)->probe_index][io_type]))
#define HIFC_IO_STAT_READ(probe_index, io_type) \
(atomic64_read(&rx_tx_stat[probe_index][io_type]))
#define HIFC_ERR_IO_STAT(v_hba, io_type) \
(atomic64_inc(&rx_tx_err[(v_hba)->probe_index][io_type]))
#define HIFC_ERR_IO_STAT_READ(probe_index, io_type) \
(atomic64_read(&rx_tx_err[probe_index][io_type]))
#define HIFC_SCQ_ERR_TYPE_STAT(v_hba, err_type) \
(atomic64_inc(&scq_err_stat[(v_hba)->probe_index][err_type]))
#define HIFC_SCQ_ERR_TYPE_STAT_READ(probe_index, io_type) \
(atomic64_read(&scq_err_stat[probe_index][io_type]))
#define HIFC_AEQ_ERR_TYPE_STAT(v_hba, err_type) \
(atomic64_inc(&aeq_err_stat[(v_hba)->probe_index][err_type]))
#define HIFC_AEQ_ERR_TYPE_STAT_READ(probe_index, io_type) \
(atomic64_read(&aeq_err_stat[probe_index][io_type]))
#define HIFC_MAILBOX_STAT(v_hba, io_type) \
(atomic64_inc(&mail_box_stat[(v_hba)->probe_index][io_type]))
#define HIFC_COM_UP_ERR_EVENT_STAT(v_hba, err_type) \
(atomic64_inc(&com_up_event_err_stat[(v_hba)->probe_index][err_type]))
#define HIFC_COM_UP_ERR_EVENT_STAT_READ(probe_index, err_type) \
(atomic64_read(&com_up_event_err_stat[probe_index][err_type]))
/*
*----------------------------------------------*
* Define function *
*----------------------------------------------
*/
#define UNF_LOWLEVEL_ALLOC_LPORT(v_lport, fc_port, stLowLevel)\
do {\
if (hifc_cm_handle.pfn_unf_alloc_local_port) { \
v_lport = \
hifc_cm_handle.pfn_unf_alloc_local_port((fc_port), \
(stLowLevel));\
} else { \
v_lport = NULL; \
} \
} while (0)
#define UNF_LOWLEVEL_RECEIVE_ELS_PKG(v_ret, fc_port, pkg) \
do { \
if (hifc_cm_handle.pfn_unf_receive_els_pkg) {\
v_ret =\
hifc_cm_handle.pfn_unf_receive_els_pkg(\
(fc_port), (pkg));\
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_SEND_ELS_DONE(v_ret, fc_port, pkg) \
do { \
if (hifc_cm_handle.pfn_unf_send_els_done) {\
v_ret = hifc_cm_handle.pfn_unf_send_els_done((fc_port),\
(pkg)); \
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_RECEIVE_GS_PKG(v_ret, fc_port, pkg)\
do { \
if (hifc_cm_handle.pfn_unf_receive_gs_pkg) {\
v_ret = hifc_cm_handle.pfn_unf_receive_gs_pkg(\
(fc_port),\
(pkg)); \
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_GET_CFG_PARMS(v_ret, \
v_section_name, \
v_cfg_parm, \
v_cfg_value, \
v_item_num) \
do { \
if (hifc_cm_handle.pfn_unf_get_cfg_parms) { \
v_ret = (unsigned int)\
hifc_cm_handle.pfn_unf_get_cfg_parms(\
(v_section_name), \
(v_cfg_parm), \
(v_cfg_value), \
(v_item_num)); \
} else { \
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT,\
UNF_WARN,\
"Get config parameter function is NULL.");\
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_RELEASE_LOCAL_PORT(v_ret, lport) \
do { \
if (unlikely(!hifc_cm_handle.pfn_unf_release_local_port)) {\
v_ret = UNF_RETURN_ERROR; \
} else { \
v_ret =\
hifc_cm_handle.pfn_unf_release_local_port(\
(lport));\
} \
} while (0)
#define UNF_LOWLEVEL_TO_CM_HINICADM(v_ret, lport, pkg) \
do { \
if (unlikely(!hifc_cm_handle.pfn_unf_ioctl_to_com_handler)) {\
v_ret = UNF_RETURN_ERROR; \
} else { \
v_ret = hifc_cm_handle.pfn_unf_ioctl_to_com_handler(\
lport, pkg); \
} \
} while (0)
#define UNF_CM_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len) \
do { \
if (unlikely(!hifc_cm_handle.pfn_unf_cm_get_sgl_entry)) {\
v_ret = UNF_RETURN_ERROR; \
} else { \
v_ret = hifc_cm_handle.pfn_unf_cm_get_sgl_entry(\
pkg, v_buf, v_buf_len);\
} \
} while (0)
#define UNF_CM_GET_DIF_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len)\
do { \
if (unlikely(!hifc_cm_handle.pfn_unf_cm_get_dif_sgl_entry)) {\
v_ret = UNF_RETURN_ERROR; \
} else { \
v_ret = hifc_cm_handle.pfn_unf_cm_get_dif_sgl_entry(\
pkg,\
v_buf,\
v_buf_len);\
} \
} while (0)
#define UNF_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len, v_dif_flag) \
do { \
if (v_dif_flag) { \
UNF_CM_GET_DIF_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len);\
} else { \
UNF_CM_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len);\
} \
} while (0)
#define UNF_GET_FREE_ESGL_PAGE(v_ret, lport, pkg) \
do { \
if (unlikely(!hifc_cm_handle.pfn_unf_get_one_free_esgl_page)) {\
v_ret = NULL; \
} else { \
v_ret = hifc_cm_handle.pfn_unf_get_one_free_esgl_page(\
lport, pkg); \
} \
} while (0)
#define UNF_LOWLEVEL_SCSI_COMPLETED(v_ret, lport, pkg) \
do { \
if (unlikely(!hifc_cm_handle.pfn_unf_receive_ini_rsponse)) {\
v_ret = UNF_RETURN_ERROR; \
} else { \
v_ret = hifc_cm_handle.pfn_unf_receive_ini_rsponse(\
lport, pkg);\
} \
} while (0)
#define UNF_LOWLEVEL_PORT_EVENT(v_ret, lport, v_events, v_input)\
do { \
if (unlikely(!hifc_cm_handle.pfn_unf_fc_port_link_event)) {\
v_ret = UNF_RETURN_ERROR; \
} else { \
v_ret = hifc_cm_handle.pfn_unf_fc_port_link_event(\
lport, v_events, v_input);\
} \
} while (0)
#define UNF_LOWLEVEL_RECEIVE_FC4LS_PKG(v_ret, fc_port, pkg)\
do { \
if (hifc_cm_handle.pfn_unf_receive_fc4_pkg) {\
v_ret = hifc_cm_handle.pfn_unf_receive_fc4_pkg(\
(fc_port), (pkg));\
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_SEND_FC4LS_DONE(v_ret, lport, pkg) \
do { \
if (hifc_cm_handle.pfn_unf_send_fc4_done) {\
v_ret = hifc_cm_handle.pfn_unf_send_fc4_done(\
(lport), (pkg));\
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_RECEIVE_BLS_PKG(v_ret, lport, pkg) \
do { \
if (hifc_cm_handle.pfn_unf_receive_bls_pkg) {\
v_ret = hifc_cm_handle.pfn_unf_receive_bls_pkg(\
(lport), (pkg)); \
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_RECEIVE_MARKER_STS(v_ret, lport, pkg)\
do { \
if (hifc_cm_handle.pfn_unf_receive_marker_status) {\
v_ret = hifc_cm_handle.pfn_unf_receive_marker_status(\
(lport), (pkg));\
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#define UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(v_ret, lport, pkg) \
do { \
if (hifc_cm_handle.pfn_unf_receive_abts_marker_status) {\
v_ret =\
hifc_cm_handle.pfn_unf_receive_abts_marker_status(\
(lport), (pkg));\
} else { \
v_ret = UNF_RETURN_ERROR; \
} \
} while (0)
#endif

1272
hifc/hifc_portmng.c Normal file

File diff suppressed because it is too large Load Diff

223
hifc/hifc_portmng.h Normal file
View File

@ -0,0 +1,223 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_PORTMNG_H__
#define __HIFC_PORTMNG_H__
#include "unf_common.h"
#include "hifc_module.h"
#include "hifc_hba.h"
#define HIFC_PORT_INFO_SIZE 10
#define HIFC_DFX_BACK_INFO_SIZE 406
#define HIFC_DFX_BACK_INFO_SIZE64 203
#define HIFC_GET_DRIVER_VERSION 16
#define HIFC_SET_BBSCN_VALUE 0
#define HIFC_QUERY_BBSCN_VALUE 1
#define HIFC_QUERY_FEC_MODE 2
#define FC_DFX_SEND_INFO_SIZE 5
#define FC_DFX_BACK_INFO_64 203
#define FC_DFX_BACK_INFO_32 406
#define FC_DFX_MAX_IO_RETURN_VALUE 0x12
#define FC_DFX_MAX_SCSI_CMD 0xFF
#define FC_DFX_SCSI_CMD_FIRST_GET 100
struct unf_adm_dfx_session_state {
unsigned char session1 : 4;
unsigned char session2 : 4;
};
struct session_counter_s {
u64 target_busy;
u64 host_busy;
u64 remote_port_wwpn;
u64 local_port_wwpn;
u32 device_alloc;
u32 device_destroy;
u32 scsi_state;
u32 remote_port_nportid;
u32 remote_port_state;
u32 remote_port_scsiid;
u32 remote_port_index;
u32 local_port_nportid;
u32 local_port_ini_state;
u32 local_port_state;
u32 port_id;
u32 host_id;
u32 target_id;
u32 abort_io;
u32 device_reset;
u32 target_reset;
u32 bus_reset;
u32 virtual_reset;
u32 abort_io_result;
u32 device_reset_result;
u32 target_reset_result;
u32 bus_reset_result;
u32 virtual_reset_result;
};
enum hifc_adm_msg_status_e {
HIFC_ADM_MSG_DONE = 0,
HIFC_ADM_MSG_INCOMPLETE,
HIFC_ADM_MSG_FAILED,
HIFC_ADM_MSG_BUTT
};
struct hifc_port_diag_op_s {
enum unf_port_diag_op_e op_code;
unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para);
};
enum hifc_adm_dfx_mod_e {
/* HBA WQE and SCQE statistic */
HIFC_TX_RX_STATE_COUNTER = 0,
/* TX and RX error counter, HBA counter */
HIFC_TX_RX_ERROR_STATE_COUNTER,
/* SCQ, AEQ, uP, common uP error counter */
HIFC_ERROR_STATE_COUNTER,
/* Link state counter */
HIFC_LINK_STATE_COUNTER,
/* Host counter */
HIFC_HOST_COUNTER,
/* session counter */
HIFC_SESSION_COUNTER,
/* DIF error counter */
HIFC_DIF_ERROR_COUNTER,
HIFC_ALL_DFX_TYPE = 50,
};
enum hifc_msg_format_e {
HIFC_DFX = 7,
HIFC_FEC_SET,
HIFC_BBSCN,
HIFC_PORTSTAT = 24,
HIFC_ALL_INFO_OP = 25,
HIFC_COMPAT_TEST = 0xFF
};
struct hifc_adm_msg_head_s {
unsigned int size;
unsigned short status;
unsigned short rsvd;
};
/* port state for fc_portstat */
struct hifc_adm_port_state {
unsigned int port_id;
unsigned int rport_num;
unsigned int init;
unsigned int offloading;
unsigned int offloaded;
unsigned int destroying;
};
/* SQ & IoStat for fc_portstat */
struct hifc_adm_sq {
unsigned int sq_id;
unsigned int rport_index;
unsigned int xid;
unsigned int cid;
unsigned int sid;
unsigned int did;
unsigned int vpid;
unsigned int cmd_local_queue_id;
unsigned int cmd_cqm_queue_id;
unsigned int sts_local_queue_id;
unsigned int sts_cqm_queue_id;
unsigned int cos;
unsigned int off_load;
unsigned int cmsn;
unsigned int pmsn;
unsigned int db_cnt;
unsigned int sqe_cnt;
unsigned int cqe_cnt;
unsigned int in_sq_cnt;
unsigned int in_chip_cnt;
};
/* hifcadm fc_portstat struct,that is used to show ListSqinfo from mml */
struct hifc_adm_lsq_info_s {
struct hifc_adm_msg_head_s msg_head;
unsigned int cmd[HIFC_PORT_INFO_SIZE];
struct hifc_adm_port_state port_state;
struct hifc_adm_sq sq;
unsigned int mark;
};
struct unf_adm_dfx_host_counter_s {
unsigned int host_num;
unsigned int port_id;
unsigned int scsi_session_add_success;
unsigned int scsi_session_add_failed;
unsigned int scsi_session_del_success;
unsigned int scsi_session_del_failed;
unsigned int device_alloc;
unsigned int device_destroy;
unsigned int session_loss_tmo;
unsigned int alloc_scsi_id;
unsigned int reuse_scsi_id;
unsigned int resume_scsi_id;
unsigned int add_start_work_failed;
unsigned int add_closing_work_failed;
unsigned int abort_io;
unsigned int device_reset;
unsigned int target_reset;
unsigned int bus_reset;
unsigned int virtual_reset;
unsigned int abort_io_result;
unsigned int device_reset_result;
unsigned int target_reset_result;
unsigned int bus_reset_result;
unsigned int virtual_reset_result;
struct unf_adm_dfx_session_state session_state[1024];
};
/* hifcadm fc_port struct */
struct hifc_adm_cmd_s {
struct hifc_adm_msg_head_s msg_head;
unsigned int cmd[HIFC_PORT_INFO_SIZE];
};
/* hifcadm fc_dfx struct */
struct hifc_adm_dfx_cmd_s {
struct hifc_adm_msg_head_s msg_head;
unsigned int cmd[HIFC_PORT_INFO_SIZE];
union {
unsigned long long result[HIFC_DFX_BACK_INFO_SIZE64];
struct unf_adm_dfx_host_counter_s host_cnt;
struct session_counter_s session_cnt;
unsigned long long scsi_cmd_in;
unsigned long long scsi_cmd_done;
unsigned long long target_busy;
unsigned long long host_busy;
} unresult;
};
unsigned int hifc_port_diagnose(void *v_hba, enum unf_port_diag_op_e op_code,
void *v_para);
unsigned int hifc_set_port_speed(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_bbscn(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_state(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_topo(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_fcp_conf(void *v_hba, void *v_para_in);
unsigned int hifc_set_loop_role(void *v_hba, void *v_para_in);
unsigned int hifc_set_max_support_speed(void *v_hba, void *v_para_in);
unsigned int hifc_show_fc_port_detail(void *v_hba, void *v_para);
int hifc_adm(void *uld_dev, unsigned int msg_formate, void *buffin,
unsigned int in_size, void *buff_out, unsigned int *out_size);
unsigned int hifc_fec_mode(void *v_hba, struct unf_hinicam_pkg *v_input);
int hifc_set_dfx_mode(void *v_hba, struct unf_hinicam_pkg *v_input);
int hifc_dfx_get_link_state(void *v_hba, void *v_buff_out);
int hifc_dfx_get_error_state(void *v_hba, void *v_buff_out);
int hifc_dfx_get_rxtx_state(void *v_hba, void *v_buff_out);
unsigned int hifc_bbscn_mode(void *v_hba, struct unf_hinicam_pkg *v_input);
unsigned int hifc_port_stat(void *v_hba, struct unf_hinicam_pkg *v_input);
int hifc_dfx_dif_error(void *v_hba, void *v_buff_out, unsigned int v_clear);
unsigned int hifc_set_hba_base_info(void *v_hba, void *v_para_in);
#endif /* __HIFC_PORTMNG_H__ */

7019
hifc/hifc_queue.c Normal file

File diff suppressed because it is too large Load Diff

1363
hifc/hifc_queue.h Normal file

File diff suppressed because it is too large Load Diff

3076
hifc/hifc_service.c Normal file

File diff suppressed because it is too large Load Diff

247
hifc/hifc_service.h Normal file
View File

@ -0,0 +1,247 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_SERVICE_H__
#define __HIFC_SERVICE_H__
/* Send ElsCmnd or ElsRsp */
unsigned int hifc_send_els_cmnd(void *phba, struct unf_frame_pkg_s *v_pkg);
/* Send GsCmnd */
unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg);
/* Send BlsCmnd */
unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg);
/* Receive Frame from Root RQ */
unsigned int hifc_rcv_service_frame_from_rq(
struct hifc_hba_s *v_hba,
struct hifc_root_rq_info_s *rq_info,
struct hifc_root_rq_complet_info_s *v_complet_info,
unsigned short v_rcv_buf_num);
unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba,
struct hifc_root_rq_complet_info_s *v_info);
unsigned int hifc_rq_rcv_els_rsp_sts(
struct hifc_hba_s *v_hba,
struct hifc_root_rq_complet_info_s *v_info);
/* Receive Frame from SCQ */
unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe,
unsigned int scq_idx);
/* FC txmfs */
#define HIFC_DEFAULT_TX_MAX_FREAM_SIZE 256
#define HIFC_FIRST_PKG_FLAG (1 << 0)
#define HIFC_LAST_PKG_FLAG (1 << 1)
#define HIFC_CHECK_IF_FIRST_PKG(pkg_flag) ((pkg_flag) & HIFC_FIRST_PKG_FLAG)
#define HIFC_CHECK_IF_LAST_PKG(pkg_flag) ((pkg_flag) & HIFC_LAST_PKG_FLAG)
#define HIFC_GET_SERVICE_TYPE(v_hba) 12
#define HIFC_GET_PACKET_TYPE(v_service_type) 1
#define HIFC_GET_PACKET_COS(v_service_type) 1
#define HIFC_GET_PRLI_PAYLOAD_LEN \
(UNF_PRLI_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE)
/* Start addr of the header/payloed of the cmnd buffer in the pkg */
#define HIFC_FC_HEAD_LEN (sizeof(struct unf_fchead_s))
#define HIFC_PAYLOAD_OFFSET (sizeof(struct unf_fchead_s))
#define HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg) \
UNF_GET_FLOGI_PAYLOAD(v_pkg)
#define HIFC_GET_CMND_HEADER_ADDR(v_pkg) \
((v_pkg)->unf_cmnd_pload_bl.buffer_ptr)
#define HIFC_GET_RSP_HEADER_ADDR(v_pkg) \
((v_pkg)->unf_rsp_pload_bl.buffer_ptr)
#define HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg) \
((v_pkg)->unf_rsp_pload_bl.buffer_ptr + HIFC_PAYLOAD_OFFSET)
#define HIFC_GET_CMND_FC_HEADER(v_pkg) \
(&(UNF_GET_SFS_ENTRY(v_pkg)->sfs_common.frame_head))
#define HIFC_PKG_IS_ELS_RSP(els_cmnd_type) \
(((els_cmnd_type) == ELS_ACC) || ((els_cmnd_type) == ELS_RJT))
#define HIFC_XID_IS_VALID(xid, exi_base, exi_count) \
(((xid) >= (exi_base)) && ((xid) < ((exi_base) + (exi_count))))
#define UNF_FC_PAYLOAD_ELS_MASK 0xFF000000
#define UNF_FC_PAYLOAD_ELS_SHIFT 24
#define UNF_FC_PAYLOAD_ELS_DWORD 0
/* Note: this pfcpayload is little endian */
#define UNF_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \
UNF_GET_SHIFTMASK(((unsigned int *)(void *)pfcpayload)\
[UNF_FC_PAYLOAD_ELS_DWORD], \
UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK)
#define HIFC_ELS_CMND_MASK 0xffff
#define HIFC_ELS_CMND__RELEVANT_SHIFT 16UL
#define HIFC_GET_ELS_CMND_CODE(__cmnd) \
((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK))
#define HIFC_GET_ELS_RSP_TYPE(__cmnd) \
((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK))
#define HIFC_GET_ELS_RSP_CODE(__cmnd) \
((unsigned short)((__cmnd) >> HIFC_ELS_CMND__RELEVANT_SHIFT & \
HIFC_ELS_CMND_MASK))
#define HIFC_GET_GS_CMND_CODE(__cmnd) \
((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK))
/* ELS CMND Request */
#define ELS_CMND 0
/* fh_f_ctl - Frame control flags. */
#define HIFC_FC_EX_CTX (1 << 23) /* sent by responder to exchange */
#define HIFC_FC_SEQ_CTX (1 << 22) /* sent by responder to sequence */
#define HIFC_FC_FIRST_SEQ (1 << 21) /* first sequence of this exchange */
#define HIFC_FC_LAST_SEQ (1 << 20) /* last sequence of this exchange */
#define HIFC_FC_END_SEQ (1 << 19) /* last frame of sequence */
#define HIFC_FC_END_CONN (1 << 18) /* end of class 1 connection pending */
#define HIFC_FC_RES_B17 (1 << 17) /* reserved */
#define HIFC_FC_SEQ_INIT (1 << 16) /* transfer of sequence initiative */
#define HIFC_FC_X_ID_REASS (1 << 15) /* exchange ID has been changed */
#define HIFC_FC_X_ID_INVAL (1 << 14) /* exchange ID invalidated */
#define HIFC_FC_ACK_1 (1 << 12) /* 13:12 = 1: ACK_1 expected */
#define HIFC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */
#define HIFC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */
#define HIFC_FC_RES_B11 (1 << 11) /* reserved */
#define HIFC_FC_RES_B10 (1 << 10) /* reserved */
#define HIFC_FC_RETX_SEQ (1 << 9) /* retransmitted sequence */
#define HIFC_FC_UNI_TX (1 << 8) /* unidirectional transmit (class 1) */
#define HIFC_FC_CONT_SEQ(i) ((i) << 6)
#define HIFC_FC_ABT_SEQ(i) ((i) << 4)
#define HIFC_FC_REL_OFF (1 << 3) /* parameter is relative offset */
#define HIFC_FC_RES2 (1 << 2) /* reserved */
#define HIFC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */
#define HIFC_FCTL_REQ (HIFC_FC_FIRST_SEQ | HIFC_FC_END_SEQ |\
HIFC_FC_SEQ_INIT)
#define HIFC_FCTL_RESP (HIFC_FC_EX_CTX | HIFC_FC_LAST_SEQ | \
HIFC_FC_END_SEQ | HIFC_FC_SEQ_INIT)
#define HIFC_RCTL_BLS_REQ 0x81
#define HIFC_RCTL_BLS_ACC 0x84
#define HIFC_RCTL_BLS_RJT 0x85
#define UNF_IO_STATE_NEW 0
#define TGT_IO_STATE_SEND_XFERRDY (1 << 2)
#define TGT_IO_STATE_RSP (1 << 5)
#define TGT_IO_STATE_ABORT (1 << 7)
enum HIFC_FC_FH_TYPE_E {
HIFC_FC_TYPE_BLS = 0x00, /* basic link service */
HIFC_FC_TYPE_ELS = 0x01, /* extended link service */
HIFC_FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */
HIFC_FC_TYPE_FCP = 0x08, /* SCSI FCP */
HIFC_FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */
HIFC_FC_TYPE_ILS = 0x22 /* internal link service */
};
enum HIFC_FC_FH_RCTL_E {
HIFC_FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */
HIFC_FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */
HIFC_FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */
HIFC_FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */
HIFC_FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */
HIFC_FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */
HIFC_FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */
HIFC_FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */
#define HIFC_FC_RCTL_ILS_REQ HIFC_FC_RCTL_DD_UNSOL_CTL /* ILS request */
#define HIFC_FC_RCTL_ILS_REP HIFC_FC_RCTL_DD_SOL_CTL /* ILS reply */
/*
* Extended Link_Data
*/
HIFC_FC_RCTL_ELS_REQ = 0x22, /* extended link services request */
HIFC_FC_RCTL_ELS_RSP = 0x23, /* extended link services reply */
HIFC_FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */
HIFC_FC_RCTL_ELS4_RSP = 0x33, /* FC-4 ELS reply */
/*
* Optional Extended Headers
*/
HIFC_FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */
HIFC_FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */
HIFC_FC_RCTL_ENCH = 0x52, /* encapsulation header */
/*
* Basic Link Services fh_r_ctl values.
*/
HIFC_FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */
HIFC_FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */
HIFC_FC_RCTL_BA_RMC = 0x82, /* remove connection */
HIFC_FC_RCTL_BA_ACC = 0x84, /* basic accept */
HIFC_FC_RCTL_BA_RJT = 0x85, /* basic reject */
HIFC_FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */
/*
* Link Control Information.
*/
HIFC_FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */
HIFC_FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */
HIFC_FC_RCTL_P_RJT = 0xc2, /* port reject */
HIFC_FC_RCTL_F_RJT = 0xc3, /* fabric reject */
HIFC_FC_RCTL_P_BSY = 0xc4, /* port busy */
HIFC_FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */
HIFC_FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */
HIFC_FC_RCTL_LCR = 0xc7, /* link credit reset */
HIFC_FC_RCTL_END = 0xc9 /* end */
};
struct hifc_fc_frame_header {
unsigned char rctl; /* routing control */
unsigned char did[3]; /* Destination ID */
unsigned char cs_ctl; /* class of service control / pri */
unsigned char sid[3]; /* Source ID */
unsigned char type; /* see enum fc_fh_type below */
unsigned char frame_ctl[3]; /* frame control */
unsigned char seq_id; /* sequence ID */
unsigned char df_ctl; /* data field control */
unsigned short seq_cnt; /* sequence count */
unsigned short ox_id; /* originator exchange ID */
unsigned short rx_id; /* responder exchange ID */
unsigned int parm_offset; /* parameter or relative offset */
};
unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned char *v_pld,
unsigned int pld_len,
int first_frame);
unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id);
unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int rx_id);
unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id);
unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id);
void hifc_save_login_para_in_sq_info(
struct hifc_hba_s *v_hba,
struct unf_port_login_parms_s *v_login_coparms);
unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba,
struct hifcoe_aqe_data_s *v_aeg_msg);
#define HIFC_CHECK_PKG_ALLOCTIME(v_pkg) \
do { \
if (unlikely(UNF_GETXCHGALLOCTIME(v_pkg) == 0)) { \
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, \
UNF_WARN, \
"[warn]Invalid MagicNum,S_ID(0x%x) D_ID(0x%x) OXID(0x%x) RX_ID(0x%x) pkg type(0x%x) hot pooltag(0x%x)", \
UNF_GET_SID(v_pkg), \
UNF_GET_DID(v_pkg), \
UNF_GET_OXID(v_pkg), \
UNF_GET_RXID(v_pkg), \
((struct unf_frame_pkg_s *)v_pkg)->type, \
UNF_GET_XCHG_TAG(v_pkg)); \
} \
} while (0)
#endif

360
hifc/hifc_sml.c Normal file
View File

@ -0,0 +1,360 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/types.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_sml.h"
#ifndef HTONL
#define HTONL(x) \
((((x) & 0x000000ff) << 24) \
| (((x) & 0x0000ff00) << 8) \
| (((x) & 0x00ff0000) >> 8) \
| (((x) & 0xff000000) >> 24))
#endif
static void sml_ctr_htonl_n(u32 *node, u32 len)
{
u32 i;
for (i = 0; i < len; i++) {
*node = HTONL(*node);
node++;
}
}
static void hifc_sml_ctr_read_build_req(struct chipif_sml_ctr_rd_req_s *msg,
u8 instance_id, u8 op_id,
u8 ack, u32 ctr_id, u32 init_val)
{
msg->head.value = 0;
msg->head.bs.instance = instance_id;
msg->head.bs.op_id = op_id;
msg->head.bs.ack = ack;
msg->head.value = HTONL(msg->head.value);
msg->ctr_id = ctr_id;
msg->ctr_id = HTONL(msg->ctr_id);
msg->initial = init_val;
}
static void hifc_sml_ctr_write_build_req(struct chipif_sml_ctr_wr_req_s *msg,
u8 instance_id, u8 op_id,
u8 ack, u32 ctr_id,
u64 val1, u64 val2)
{
msg->head.value = 0;
msg->head.bs.instance = instance_id;
msg->head.bs.op_id = op_id;
msg->head.bs.ack = ack;
msg->head.value = HTONL(msg->head.value);
msg->ctr_id = ctr_id;
msg->ctr_id = HTONL(msg->ctr_id);
msg->value1_h = val1 >> 32;
msg->value1_l = val1 & 0xFFFFFFFF;
msg->value2_h = val2 >> 32;
msg->value2_l = val2 & 0xFFFFFFFF;
}
/**
* hifc_sm_ctr_rd32 - small single 32 counter read
* @hwdev: the pointer to hw device
* @node: the node id
* @instance: instance value
* @ctr_id: counter id
* @value: read counter value ptr
* Return: 0 - success, negative - failure
*/
int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value)
{
struct chipif_sml_ctr_rd_req_s req;
union ctr_rd_rsp_u rsp;
int ret;
if (!hwdev || !value)
return -EFAULT;
hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
CHIPIF_ACK, ctr_id, 0);
ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req),
(void *)&rsp, (unsigned short)sizeof(rsp));
if (ret) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Sm 32bit counter read fail, err(%d)\n", ret);
return ret;
}
sml_ctr_htonl_n((u32 *)&rsp, 4);
*value = rsp.bs_ss32_rsp.value1;
return 0;
}
/**
* hifc_sm_ctr_rd32_clear - small single 32 counter read and clear to zero
* @hwdev: the pointer to hw device
* @node: the node id
* @instance: instance value
* @ctr_id: counter id
* @value: read counter value ptr
* Return: 0 - success, negative - failure
* according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc)
*/
int hifc_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance,
u32 ctr_id, u32 *value)
{
struct chipif_sml_ctr_rd_req_s req;
union ctr_rd_rsp_u rsp;
int ret;
if (!hwdev || !value)
return -EFAULT;
hifc_sml_ctr_read_build_req(&req, instance,
CHIPIF_SM_CTR_OP_READ_CLEAR,
CHIPIF_ACK, ctr_id, 0);
ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req),
(void *)&rsp, (unsigned short)sizeof(rsp));
if (ret) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Sm 32bit counter clear fail, err(%d)\n", ret);
return ret;
}
sml_ctr_htonl_n((u32 *)&rsp, 4);
*value = rsp.bs_ss32_rsp.value1;
return 0;
}
/**
* hifc_sm_ctr_wr32 - small single 32 counter write
* @hwdev: the pointer to hw device
* @node: the node id
* @instance: instance value
* @ctr_id: counter id
* @value: write counter value
* Return: 0 - success, negative - failure
*/
int hifc_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value)
{
struct chipif_sml_ctr_wr_req_s req;
struct chipif_sml_ctr_wr_rsp_s rsp;
if (!hwdev)
return -EFAULT;
hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
CHIPIF_NOACK, ctr_id, (u64)value, 0ULL);
return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req), (void *)&rsp,
(unsigned short)sizeof(rsp));
}
/**
* hifc_sm_ctr_rd64 - big counter 64 read
* @hwdev: the pointer to hw device
* @node: the node id
* @instance: instance value
* @ctr_id: counter id
* @value: read counter value ptr
* Return: 0 - success, negative - failure
*/
int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value)
{
struct chipif_sml_ctr_rd_req_s req;
union ctr_rd_rsp_u rsp;
int ret;
if (!hwdev || !value)
return -EFAULT;
hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
CHIPIF_ACK, ctr_id, 0);
ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req), (void *)&rsp,
(unsigned short)sizeof(rsp));
if (ret) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Sm 64bit counter read fail err(%d)\n", ret);
return ret;
}
sml_ctr_htonl_n((u32 *)&rsp, 4);
*value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2;
return 0;
}
/**
* hifc_sm_ctr_wr64 - big single 64 counter write
* @hwdev: the pointer to hw device
* @node: the node id
* @instance: instance value
* @ctr_id: counter id
* @value: write counter value
* Return: 0 - success, negative - failure
*/
int hifc_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value)
{
struct chipif_sml_ctr_wr_req_s req;
struct chipif_sml_ctr_wr_rsp_s rsp;
if (!hwdev)
return -EFAULT;
hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
CHIPIF_NOACK, ctr_id, value, 0ULL);
return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req), (void *)&rsp,
(unsigned short)sizeof(rsp));
}
/**
* hifc_sm_ctr_rd64_pair - big pair 128 counter read
* @hwdev: the pointer to hw device
* @node: the node id
* @instance: instance value
* @ctr_id: counter id
* @value1: read counter value ptr
* @value2: read counter value ptr
* Return: 0 - success, negative - failure
*/
int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
u32 ctr_id, u64 *value1, u64 *value2)
{
struct chipif_sml_ctr_rd_req_s req;
union ctr_rd_rsp_u rsp;
int ret;
if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) {
pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd number\n",
hwdev, value1, value2, ctr_id);
return -EFAULT;
}
hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
CHIPIF_ACK, ctr_id, 0);
ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req), (void *)&rsp,
(unsigned short)sizeof(rsp));
if (ret) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Sm 64 bit rd pair ret(%d)\n", ret);
return ret;
}
sml_ctr_htonl_n((u32 *)&rsp, 4);
*value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l;
*value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l;
return 0;
}
/**
* hifc_sm_ctr_wr64_pair - big pair 128 counter write
* @hwdev: the pointer to hw device
* @node: the node id
* @ctr_id: counter id
* @instance: instance value
* @value1: write counter value
* @value2: write counter value
* Return: 0 - success, negative - failure
*/
int hifc_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance,
u32 ctr_id, u64 value1, u64 value2)
{
struct chipif_sml_ctr_wr_req_s req;
struct chipif_sml_ctr_wr_rsp_s rsp;
/* pair pattern ctr_id must be even number */
if (!hwdev || (0 != (ctr_id & 0x1))) {
pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n",
ctr_id);
return -EFAULT;
}
hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
CHIPIF_NOACK, ctr_id, value1, value2);
return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req,
(unsigned short)sizeof(req), (void *)&rsp,
(unsigned short)sizeof(rsp));
}
int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val)
{
struct hifc_csr_request_api_data api_data = {0};
u32 csr_val = 0;
u16 in_size = sizeof(api_data);
int ret;
if (!hwdev || !val)
return -EFAULT;
memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data));
api_data.dw0 = 0;
api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_READ_CSR;
api_data.dw1.bits.need_response = HIFC_CSR_NEED_RESP_DATA;
api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32;
api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
api_data.dw2.bits.csr_addr = addr;
api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
ret = hifc_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data),
in_size, &csr_val, 4);
if (ret) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Read 32 bit csr fail, dest %d addr 0x%x, ret: 0x%x\n",
dest, addr, ret);
return ret;
}
*val = csr_val;
return 0;
}
int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val)
{
struct hifc_csr_request_api_data api_data;
u16 in_size = sizeof(api_data);
int ret;
if (!hwdev)
return -EFAULT;
memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data));
api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_WRITE_CSR;
api_data.dw1.bits.need_response = HIFC_CSR_NO_RESP_DATA;
api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32;
api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
api_data.dw2.bits.csr_addr = addr;
api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
api_data.csr_write_data_h = 0xffffffff;
api_data.csr_write_data_l = val;
ret = hifc_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size);
if (ret) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Write 32 bit csr fail! dest %d addr 0x%x val 0x%x\n",
dest, addr, val);
return ret;
}
return 0;
}

182
hifc/hifc_sml.h Normal file
View File

@ -0,0 +1,182 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __CHIPIF_SML_COUNTER_H__
#define __CHIPIF_SML_COUNTER_H__
#define CHIPIF_FUNC_PF 0
#define CHIPIF_FUNC_VF 1
#define CHIPIF_FUNC_PPF 2
#define CHIPIF_ACK 1
#define CHIPIF_NOACK 0
#define CHIPIF_SM_CTR_OP_READ 0x2
#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6
#define CHIPIF_SM_CTR_OP_WRITE 0x3
#define SMALL_CNT_READ_RSP_SIZE 16
/* request head */
union chipif_sml_ctr_req_head_u {
struct {
u32 pad:15;
u32 ack:1;
u32 op_id:5;
u32 instance:6;
u32 src:5;
} bs;
u32 value;
};
/* counter read request struct */
struct chipif_sml_ctr_rd_req_s {
u32 extra;
union chipif_sml_ctr_req_head_u head;
u32 ctr_id;
u32 initial;
u32 pad;
};
/* counter read response union */
union ctr_rd_rsp_u {
struct {
u32 value1:16;
u32 pad0:16;
u32 pad1[3];
} bs_ss16_rsp;
struct {
u32 value1;
u32 pad[3];
} bs_ss32_rsp;
struct {
u32 value1:20;
u32 pad0:12;
u32 value2:12;
u32 pad1:20;
u32 pad2[2];
} bs_sp_rsp;
struct {
u32 value1;
u32 value2;
u32 pad[2];
} bs_bs64_rsp;
struct {
u32 val1_h;
u32 val1_l;
u32 val2_h;
u32 val2_l;
} bs_bp64_rsp;
};
/* resopnse head */
union sml_ctr_rsp_head_u {
struct {
u32 pad:30; /* reserve */
u32 code:2; /* error code */
} bs;
u32 value;
};
/* counter write request struct */
struct chipif_sml_ctr_wr_req_s {
u32 extra;
union chipif_sml_ctr_req_head_u head;
u32 ctr_id;
u32 rsv1;
u32 rsv2;
u32 value1_h;
u32 value1_l;
u32 value2_h;
u32 value2_l;
};
/* counter write response struct */
struct chipif_sml_ctr_wr_rsp_s {
union sml_ctr_rsp_head_u head;
u32 pad[3];
};
enum HIFC_CSR_API_DATA_OPERATION_ID {
HIFC_CSR_OPERATION_WRITE_CSR = 0x1E,
HIFC_CSR_OPERATION_READ_CSR = 0x1F
};
enum HIFC_CSR_API_DATA_NEED_RESPONSE_DATA {
HIFC_CSR_NO_RESP_DATA = 0,
HIFC_CSR_NEED_RESP_DATA = 1
};
enum HIFC_CSR_API_DATA_DATA_SIZE {
HIFC_CSR_DATA_SZ_32 = 0,
HIFC_CSR_DATA_SZ_64 = 1
};
struct hifc_csr_request_api_data {
u32 dw0;
union {
struct {
u32 reserved1:13;
/* this field indicates the write/read data size:
* 2'b00: 32 bits
* 2'b01: 64 bits
* 2'b10~2'b11:reserved
*/
u32 data_size:2;
/* this field indicates that requestor expect receive a
* response data or not.
* 1'b0: expect not to receive a response data.
* 1'b1: expect to receive a response data.
*/
u32 need_response:1;
/* this field indicates the operation that the requestor
* expected.
* 5'b1_1110: write value to csr space.
* 5'b1_1111: read register from csr space.
*/
u32 operation_id:5;
u32 reserved2:6;
/* this field specifies the Src node ID for this API
* request message.
*/
u32 src_node_id:5;
} bits;
u32 val32;
} dw1;
union {
struct {
/* it specifies the CSR address. */
u32 csr_addr:26;
u32 reserved3:6;
} bits;
u32 val32;
} dw2;
/* if data_size=2'b01, it is high 32 bits of write data. else, it is
* 32'hFFFF_FFFF.
*/
u32 csr_write_data_h;
/* the low 32 bits of write data. */
u32 csr_write_data_l;
};
int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value);
int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value);
int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
u32 ctr_id, u64 *value1, u64 *value2);
#endif

785
hifc/hifc_tool.c Normal file
View File

@ -0,0 +1,785 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <net/sock.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_lld.h"
#include "hifc_dbgtool_knl.h"
#include "hifc_tool.h"
#include "hifc_portmng.h"
#define HIADM_DEV_PATH "/dev/hifc_dev"
#define HIADM_DEV_CLASS "hifc_class"
#define HIADM_DEV_NAME "hifc_dev"
#define MAJOR_DEV_NUM 921
#define HIFC_CMDQ_BUF_MAX_SIZE 2048U
#define MSG_MAX_IN_SIZE (2048 * 1024)
#define MSG_MAX_OUT_SIZE (2048 * 1024)
static dev_t g_dev_id = {0};
static struct class *g_nictool_class;
static struct cdev g_nictool_cdev;
static int g_nictool_init_flag;
static int g_nictool_ref_cnt;
static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in)
{
if (!buf_in)
return;
if (nt_msg->module == SEND_TO_UCODE)
hifc_free_cmd_buf(hwdev, buf_in);
else
kfree(buf_in);
}
static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg,
u32 in_size, void **buf_in)
{
void *msg_buf;
if (!in_size)
return 0;
if (nt_msg->module == SEND_TO_UCODE) {
struct hifc_cmd_buf *cmd_buf;
if (in_size > HIFC_CMDQ_BUF_MAX_SIZE) {
pr_err("Cmdq in size(%u) more than 2KB\n", in_size);
return -ENOMEM;
}
cmd_buf = hifc_alloc_cmd_buf(hwdev);
if (!cmd_buf) {
pr_err("Alloc cmdq cmd buffer failed in %s\n",
__func__);
return -ENOMEM;
}
msg_buf = cmd_buf->buf;
*buf_in = (void *)cmd_buf;
cmd_buf->size = (u16)in_size;
} else {
if (in_size > MSG_MAX_IN_SIZE) {
pr_err("In size(%u) more than 2M\n", in_size);
return -ENOMEM;
}
msg_buf = kzalloc(in_size, GFP_KERNEL);
*buf_in = msg_buf;
}
if (!(*buf_in)) {
pr_err("Alloc buffer in failed\n");
return -ENOMEM;
}
if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) {
pr_err("%s:%d: Copy from user failed\n",
__func__, __LINE__);
free_buff_in(hwdev, nt_msg, *buf_in);
return -EFAULT;
}
return 0;
}
static void free_buff_out(void *hwdev, struct msg_module *nt_msg,
void *buf_out)
{
if (!buf_out)
return;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm)
hifc_free_cmd_buf(hwdev, buf_out);
else
kfree(buf_out);
}
static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg,
u32 out_size, void **buf_out)
{
if (!out_size)
return 0;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm) {
struct hifc_cmd_buf *cmd_buf;
if (out_size > HIFC_CMDQ_BUF_MAX_SIZE) {
pr_err("Cmdq out size(%u) more than 2KB\n", out_size);
return -ENOMEM;
}
cmd_buf = hifc_alloc_cmd_buf(hwdev);
*buf_out = (void *)cmd_buf;
} else {
if (out_size > MSG_MAX_OUT_SIZE) {
pr_err("out size(%u) more than 2M\n", out_size);
return -ENOMEM;
}
*buf_out = kzalloc(out_size, GFP_KERNEL);
}
if (!(*buf_out)) {
pr_err("Alloc buffer out failed\n");
return -ENOMEM;
}
return 0;
}
static int copy_buf_out_to_user(struct msg_module *nt_msg,
u32 out_size, void *buf_out)
{
int ret = 0;
void *msg_out;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm)
msg_out = ((struct hifc_cmd_buf *)buf_out)->buf;
else
msg_out = buf_out;
if (copy_to_user(nt_msg->out_buf, msg_out, out_size))
ret = -EFAULT;
return ret;
}
static int __get_card_usr_api_chain_mem(int card_idx)
{
#define DBGTOOL_PAGE_ORDER 10
unsigned char *tmp;
int i;
mutex_lock(&g_hifc_addr_lock);
g_hifc_card_id = card_idx;
if (!g_hifc_card_vir_addr[card_idx]) {
g_hifc_card_vir_addr[card_idx] =
(void *)__get_free_pages(GFP_KERNEL,
DBGTOOL_PAGE_ORDER);
if (!g_hifc_card_vir_addr[card_idx]) {
pr_err("Alloc api chain memory fail for card %d.\n",
card_idx);
mutex_unlock(&g_hifc_addr_lock);
return -EFAULT;
}
memset(g_hifc_card_vir_addr[card_idx], 0,
PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
g_hifc_card_phy_addr[card_idx] =
virt_to_phys(g_hifc_card_vir_addr[card_idx]);
if (!g_hifc_card_phy_addr[card_idx]) {
pr_err("phy addr for card %d is 0.\n", card_idx);
free_pages((unsigned long)g_hifc_card_vir_addr[card_idx],
DBGTOOL_PAGE_ORDER);
g_hifc_card_vir_addr[card_idx] = NULL;
mutex_unlock(&g_hifc_addr_lock);
return -EFAULT;
}
tmp = g_hifc_card_vir_addr[card_idx];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
SetPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
}
mutex_unlock(&g_hifc_addr_lock);
return 0;
}
static int get_card_func_info(char *dev_name, struct msg_module *nt_msg)
{
struct hifc_card_func_info card_func_info = {0};
int id, err;
if (nt_msg->len_info.out_buff_len != sizeof(card_func_info) ||
nt_msg->len_info.in_buff_len != sizeof(card_func_info)) {
pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n",
nt_msg->len_info.out_buff_len,
nt_msg->len_info.in_buff_len,
sizeof(card_func_info));
return -EINVAL;
}
err = memcmp(dev_name, HIFC_CHIP_NAME, strlen(HIFC_CHIP_NAME));
if (err) {
pr_err("Invalid chip name %s\n", dev_name);
return err;
}
err = sscanf(dev_name, HIFC_CHIP_NAME "%d", &id);
if (err < 0) {
pr_err("Failed to get hifc id\n");
return err;
}
if (id >= MAX_CARD_NUM) {
pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1);
return -EINVAL;
}
hifc_get_card_func_info_by_card_name(dev_name, &card_func_info);
if (!card_func_info.num_pf) {
pr_err("None function found for %s\n", dev_name);
return -EFAULT;
}
err = __get_card_usr_api_chain_mem(id);
if (err) {
pr_err("Faile to get api chain memory for userspace %s\n",
dev_name);
return -EFAULT;
}
card_func_info.usr_api_phy_addr = g_hifc_card_phy_addr[id];
/* Copy the dev_info to user mode */
if (copy_to_user(nt_msg->out_buf, &card_func_info,
sizeof(card_func_info))) {
pr_err("Copy dev_info to user fail\n");
return -EFAULT;
}
return 0;
}
static bool is_mgmt_cmd_support(void *hwdev, unsigned int mod, u32 up_api_type)
{
if (FUNC_SUPPORT_MGMT(hwdev)) {
if (up_api_type == API_CLP) {
if (!hifc_is_hwdev_mod_inited
(hwdev, HIFC_HWDEV_CLP_INITED)) {
pr_err("CLP have not initialized\n");
return false;
}
} else if (!hifc_is_hwdev_mod_inited
(hwdev, HIFC_HWDEV_MGMT_INITED)) {
pr_err("MGMT have not initialized\n");
return false;
}
} else if (!hifc_is_hwdev_mod_inited
(hwdev, HIFC_HWDEV_MBOX_INITED)) {
pr_err("MBOX have not initialized\n");
return false;
}
return true;
}
static bool is_hwdev_cmd_support(unsigned int mod,
char *ifname, u32 up_api_type)
{
void *hwdev;
hwdev = hifc_get_hwdev_by_ifname(ifname);
if (!hwdev) {
pr_err("Can not get the device %s correctly\n", ifname);
return false;
}
switch (mod) {
case SEND_TO_UP:
case SEND_TO_SM:
return is_mgmt_cmd_support(hwdev, mod, up_api_type);
case SEND_TO_UCODE:
if (!hifc_is_hwdev_mod_inited(hwdev,
HIFC_HWDEV_CMDQ_INITED)) {
pr_err("CMDQ have not initialized\n");
return false;
}
break;
default:
return false;
}
return true;
}
static bool nictool_k_is_cmd_support(unsigned int mod,
char *ifname, u32 up_api_type)
{
enum hifc_init_state init_state =
hifc_get_init_state_by_ifname(ifname);
if (init_state == HIFC_INIT_STATE_NONE)
return false;
if (mod == HIFCADM_FC_DRIVER) {
if (init_state < HIFC_INIT_STATE_ALL_INITED) {
pr_err("HIFC driver have not initialized\n");
return false;
}
return true;
} else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) {
return is_hwdev_cmd_support(mod, ifname, up_api_type);
} else if (mod == SEND_TO_HW_DRIVER) {
if (init_state < HIFC_INIT_STATE_HWDEV_INITED) {
pr_err("Hwdev have not initialized\n");
return false;
}
return true;
}
return false;
}
static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size,
void **buf_in, u32 out_size, void **buf_out)
{
int ret;
ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in);
if (ret) {
pr_err("Alloc tool cmd buff in failed\n");
return ret;
}
ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out);
if (ret) {
pr_err("Alloc tool cmd buff out failed\n");
goto out_free_buf_in;
}
return 0;
out_free_buf_in:
free_buff_in(hwdev, nt_msg, *buf_in);
return ret;
}
static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg,
void *buf_in, void *buf_out)
{
free_buff_out(hwdev, nt_msg, buf_out);
free_buff_in(hwdev, nt_msg, buf_in);
}
static int get_all_chip_id_cmd(struct msg_module *nt_msg)
{
struct nic_card_id card_id;
hifc_get_all_chip_id((void *)&card_id);
if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) {
pr_err("Copy chip id to user failed\n");
return -EFAULT;
}
return 0;
}
static bool __is_pcidev_match_dev_name(const char *ifname,
struct hifc_pcidev *dev)
{
if (!strncmp(dev->uld_dev_name, ifname, IFNAMSIZ))
return true;
if ((dev->uld_dev) && (strlen(ifname) == 0))
return true;
return false;
}
struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname)
{
struct card_node *chip_node;
struct hifc_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hifc_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (__is_pcidev_match_dev_name(ifname, dev)) {
lld_dev_put();
return dev;
}
}
}
lld_dev_put();
return NULL;
}
static void *get_support_uld_dev(struct msg_module *nt_msg)
{
struct hifc_pcidev *dev;
dev = hifc_get_pcidev_by_dev_name(nt_msg->device_name);
if (dev)
return dev->uld_dev;
return NULL;
}
static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
enum hifc_service_type type;
int ret = 0;
type = nt_msg->module - SEND_TO_SM;
if (type != SERVICE_T_FC) {
pr_err("err cmd type: %d\n", type);
return ret;
}
*out_size = sizeof(struct drv_version_info);
ret = hifc_adm(NULL, nt_msg->msg_formate, buf_in, in_size,
buf_out, out_size);
if (ret)
return ret;
if (copy_to_user(nt_msg->out_buf, buf_out, *out_size))
return -EFAULT;
return ret;
}
static int send_to_service_driver(struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
enum hifc_service_type type;
void *uld_dev;
int ret = -EINVAL;
type = nt_msg->module - SEND_TO_SM;
if (type == SERVICE_T_FC) {
uld_dev = get_support_uld_dev(nt_msg);
if (!uld_dev)
return -EINVAL;
ret = hifc_adm(uld_dev,
nt_msg->msg_formate,
buf_in, in_size, buf_out,
out_size);
} else {
pr_err("Ioctl input module id: %d is incorrectly\n",
nt_msg->module);
}
return ret;
}
static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
int ret;
switch (nt_msg->module) {
case SEND_TO_HW_DRIVER:
ret = send_to_hw_driver(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_UP:
ret = send_to_up(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_UCODE:
ret = send_to_ucode(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_SM:
ret = send_to_sm(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
default:
ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out,
out_size);
break;
}
return ret;
}
static bool hifc_is_special_handling_cmd(struct msg_module *nt_msg, int *ret)
{
bool handled = true;
if (nt_msg->module != SEND_TO_HW_DRIVER)
return false;
switch (nt_msg->msg_formate) {
case GET_CHIP_ID:
*ret = get_all_chip_id_cmd(nt_msg);
break;
case GET_CHIP_INFO:
*ret = get_card_func_info(nt_msg->device_name, nt_msg);
break;
default:
handled = false;
break;
}
return handled;
}
static int do_nictool_ioctl_cmd(void *hwdev, struct msg_module *nt_msg)
{
void *buf_out = NULL;
void *buf_in = NULL;
u32 out_size_expect;
u32 out_size, in_size;
int ret = 0;
out_size_expect = nt_msg->len_info.out_buff_len;
in_size = nt_msg->len_info.in_buff_len;
ret = alloc_tmp_buf(hwdev, nt_msg, in_size,
&buf_in, out_size_expect, &buf_out);
if (ret) {
pr_err("Alloc tmp buff failed\n");
return ret;
}
out_size = out_size_expect;
if ((nt_msg->msg_formate == GET_DRV_VERSION) &&
(nt_msg->module == HIFCADM_FC_DRIVER)) {
ret = get_service_drv_version(hwdev, nt_msg, buf_in,
in_size, buf_out, &out_size);
goto out_free_buf;
}
ret = nictool_exec_cmd(hwdev, nt_msg, buf_in,
in_size, buf_out, &out_size);
if (ret) {
pr_err("nictool_exec_cmd failed, mod:%d msg_formate:%d\n",
nt_msg->module, nt_msg->msg_formate);
goto out_free_buf;
}
if (out_size_expect && buf_out) {
ret = copy_buf_out_to_user(nt_msg, out_size_expect, buf_out);
if (ret)
pr_err("Copy information to user failed\n");
}
out_free_buf:
free_tmp_buf(hwdev, nt_msg, buf_in, buf_out);
return ret;
}
static long nictool_k_unlocked_ioctl(struct file *pfile,
unsigned int cmd, unsigned long arg)
{
void *hwdev;
struct msg_module nt_msg;
int ret = 0;
memset(&nt_msg, 0, sizeof(nt_msg));
if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) {
pr_err("Copy information from user failed\n");
return -EFAULT;
}
/* end with '\0' */
nt_msg.device_name[IFNAMSIZ - 1] = '\0';
hifc_tool_cnt_inc();
if (hifc_is_special_handling_cmd(&nt_msg, &ret))
goto out_free_lock;
if (nt_msg.module == HIFCADM_FC_DRIVER &&
nt_msg.msg_formate == GET_CHIP_ID)
hifc_get_fc_devname(nt_msg.device_name);
if (!nictool_k_is_cmd_support(nt_msg.module, nt_msg.device_name,
nt_msg.up_cmd.up_db.up_api_type)) {
ret = -EFAULT;
goto out_free_lock;
}
/* get the netdevice */
hwdev = hifc_get_hwdev_by_ifname(nt_msg.device_name);
if (!hwdev) {
pr_err("Can not get the device %s correctly\n",
nt_msg.device_name);
ret = -ENODEV;
goto out_free_lock;
}
ret = do_nictool_ioctl_cmd(hwdev, &nt_msg);
out_free_lock:
hifc_tool_cnt_dec();
return (long)ret;
}
static int nictool_k_open(struct inode *pnode, struct file *pfile)
{
return 0;
}
static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static const struct file_operations fifo_operations = {
.owner = THIS_MODULE,
.open = nictool_k_open,
.read = nictool_k_read,
.write = nictool_k_write,
.unlocked_ioctl = nictool_k_unlocked_ioctl,
.mmap = hifc_mem_mmap,
};
static int if_nictool_exist(void)
{
struct file *fp = NULL;
int exist = 0;
fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0);
if (IS_ERR(fp)) {
exist = 0;
} else {
(void)filp_close(fp, NULL);
exist = 1;
}
return exist;
}
/**
* hifc_tool_k_init - initialize the hw interface
*/
int hifc_tool_k_init(void)
{
int ret;
struct device *pdevice;
if (g_nictool_init_flag) {
g_nictool_ref_cnt++;
/* already initialized */
return 0;
}
if (if_nictool_exist()) {
pr_err("Nictool device exists\n");
return 0;
}
/* Device ID: primary device ID (12bit) |
* secondary device number (20bit)
*/
g_dev_id = MKDEV(MAJOR_DEV_NUM, 0);
/* Static device registration number */
ret = register_chrdev_region(g_dev_id, 1, HIADM_DEV_NAME);
if (ret < 0) {
ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME);
if (ret < 0) {
pr_err("Register nictool_dev fail(0x%x)\n", ret);
return ret;
}
}
/* Create equipment */
/*lint -save -e160*/
g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS);
/*lint -restore*/
if (IS_ERR(g_nictool_class)) {
pr_err("Create nictool_class fail\n");
ret = -EFAULT;
goto class_create_err;
}
/* Initializing the character device */
cdev_init(&g_nictool_cdev, &fifo_operations);
/* Add devices to the operating system */
ret = cdev_add(&g_nictool_cdev, g_dev_id, 1);
if (ret < 0) {
pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret);
goto cdev_add_err;
}
/* Export device information to user space
* (/sys/class/class name/device name)
*/
pdevice = device_create(g_nictool_class, NULL,
g_dev_id, NULL, HIADM_DEV_NAME);
if (IS_ERR(pdevice)) {
pr_err("Export nictool device information to user space fail\n");
ret = -EFAULT;
goto device_create_err;
}
g_nictool_init_flag = 1;
g_nictool_ref_cnt = 1;
pr_info("Register nictool_dev to system succeed\n");
return 0;
device_create_err:
cdev_del(&g_nictool_cdev);
cdev_add_err:
class_destroy(g_nictool_class);
class_create_err:
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
return ret;
}
void hifc_tool_k_uninit(void)
{
if (g_nictool_init_flag) {
if ((--g_nictool_ref_cnt))
return;
}
g_nictool_init_flag = 0;
if (!g_nictool_class || IS_ERR(g_nictool_class))
return;
cdev_del(&g_nictool_cdev);
device_destroy(g_nictool_class, g_dev_id);
class_destroy(g_nictool_class);
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
pr_info("Unregister nictool_dev succeed\n");
}

331
hifc/hifc_tool.h Normal file
View File

@ -0,0 +1,331 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_NICTOOL_H_
#define HIFC_NICTOOL_H_
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
/* completion timeout interval, unit is jiffies*/
#define UP_COMP_TIME_OUT_VAL 10000U
struct sm_in_st {
int node;
int id;
int instance;
};
struct sm_out_st {
u64 val1;
u64 val2;
};
struct up_log_msg_st {
u32 rd_len;
u32 addr;
};
struct csr_write_st {
u32 rd_len;
u32 addr;
u8 *data;
};
struct ipsurx_stats_info {
u32 addr;
u32 rd_cnt;
};
struct ucode_cmd_st {
union {
struct {
u32 comm_mod_type:8;
u32 ucode_cmd_type:4;
u32 cmdq_ack_type:3;
u32 ucode_imm:1;
u32 len:16;
} ucode_db;
u32 value;
};
};
struct up_cmd_st {
union {
struct {
u32 comm_mod_type:8;
u32 chipif_cmd:8;
u32 up_api_type:16;
} up_db;
u32 value;
};
};
struct _dcb_data {
u8 wr_flag;
u8 dcb_en;
u8 err;
u8 rsvd;
};
union _dcb_ctl {
struct _dcb_data dcb_data;
u32 data;
};
struct _pfc_data {
u8 pfc_en;
u8 pfc_priority;
u8 num_of_tc;
u8 err;
};
union _pfc {
struct _pfc_data pfc_data;
u32 data;
};
union _flag_com {
struct _ets_flag {
u8 flag_ets_enable:1;
u8 flag_ets_percent:1;
u8 flag_ets_cos:1;
u8 flag_ets_strict:1;
u8 rev:4;
} ets_flag;
u8 data;
};
struct _ets {
u8 ets_en;
u8 err;
u8 strict;
u8 tc[8];
u8 ets_percent[8];
union _flag_com flag_com;
};
#define API_CMD 0x1
#define API_CHAIN 0x2
#define API_CLP 0x3
struct msg_module {
char device_name[IFNAMSIZ];
unsigned int module;
union {
u32 msg_formate;
struct ucode_cmd_st ucode_cmd;
struct up_cmd_st up_cmd;
};
struct {
u32 in_buff_len;
u32 out_buff_len;
} len_info;
u32 res;
void *in_buff;
void *out_buf;
};
#define MAX_VER_INFO_LEN 128
struct drv_version_info {
char ver[MAX_VER_INFO_LEN];
};
struct chip_fault_stats {
int offset;
u8 chip_faults[MAX_DRV_BUF_SIZE];
};
struct hifc_wqe_info {
int q_id;
void *slq_handle;
unsigned int wqe_id;
};
struct hifc_tx_hw_page {
u64 phy_addr;
u64 *map_addr;
};
struct hifc_dbg_sq_info {
u16 q_id;
u16 pi;
u16 ci;/* sw_ci */
u16 fi;/* hw_ci */
u32 q_depth;
u16 pi_reverse;
u16 weqbb_size;
u8 priority;
u16 *ci_addr;
u64 cla_addr;
void *slq_handle;
struct hifc_tx_hw_page direct_wqe;
struct hifc_tx_hw_page db_addr;
u32 pg_idx;
u32 glb_sq_id;
};
struct hifc_dbg_rq_info {
u16 q_id;
u16 glb_rq_id;
u16 hw_pi;
u16 ci; /* sw_ci */
u16 sw_pi;
u16 wqebb_size;
u16 q_depth;
u16 buf_len;
void *slq_handle;
u64 ci_wqe_page_addr;
u64 ci_cla_tbl_addr;
u16 msix_idx;
u32 msix_vector;
};
#ifndef BUSINFO_LEN
#define BUSINFO_LEN (32)
#endif
struct pf_info {
char name[IFNAMSIZ];
char bus_info[BUSINFO_LEN];
u32 pf_type;
};
#ifndef MAX_SIZE
#define MAX_SIZE (16)
#endif
struct card_info {
struct pf_info pf[MAX_SIZE];
u32 pf_num;
};
struct nic_card_id {
u32 id[MAX_SIZE];
u32 num;
};
struct func_pdev_info {
u64 bar0_phy_addr;
u64 bar0_size;
u64 rsvd1[4];
};
struct hifc_card_func_info {
u32 num_pf;
u32 rsvd0;
u64 usr_api_phy_addr;
struct func_pdev_info pdev_info[MAX_SIZE];
};
#ifndef NIC_UP_CMD_UPDATE_FW
#define NIC_UP_CMD_UPDATE_FW (114)
#endif
#ifndef MAX_CARD_NUM
#define MAX_CARD_NUM (64)
#endif
extern void *g_hifc_card_node_array[MAX_CARD_NUM];
extern void *g_hifc_card_vir_addr[MAX_CARD_NUM];
extern u64 g_hifc_card_phy_addr[MAX_CARD_NUM];
extern struct mutex g_hifc_addr_lock;
extern int g_hifc_card_id;
struct hifc_nic_loop_mode {
u32 loop_mode;
u32 loop_ctrl;
};
struct hifc_nic_poll_weight {
int poll_weight;
};
enum hifc_homologues_state {
HIFC_HOMOLOGUES_OFF = 0,
HIFC_HOMOLOGUES_ON = 1,
};
struct hifc_homologues {
enum hifc_homologues_state homo_state;
};
struct hifc_pf_info {
u32 isvalid;
u32 pf_id;
};
enum module_name {
SEND_TO_NIC_DRIVER = 1,
SEND_TO_HW_DRIVER,
SEND_TO_UCODE,
SEND_TO_UP,
SEND_TO_SM,
HIFCADM_FC_DRIVER = 10,
};
enum driver_cmd_type {
FUNC_TYPE = 12,
GET_FUNC_IDX,
GET_DRV_VERSION = 16,
GET_HW_STATS = 18,
CLEAR_HW_STATS,
GET_CHIP_FAULT_STATS = 21,
GET_CHIP_ID = 25,
GET_SINGLE_CARD_INFO,
GET_FIRMWARE_ACTIVE_STATUS,
GET_DEVICE_ID = 29,
IS_DRV_IN_VM = 44,
GET_CHIP_INFO = 48,
GET_PF_ID = 52,
PORT_ID = 0x42
};
enum api_chain_cmd_type {
API_CSR_READ,
API_CSR_WRITE
};
enum sm_cmd_type {
SM_CTR_RD32 = 1,
SM_CTR_RD64_PAIR,
SM_CTR_RD64
};
int hifc_tool_k_init(void);
void hifc_tool_k_uninit(void);
int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size);
int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
int send_to_up(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
int send_to_ucode(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
void hifc_get_fc_devname(char *devname);
void *hifc_get_hwdev_by_ifname(char *ifname);
enum hifc_init_state hifc_get_init_state_by_ifname(char *ifname);
void hifc_get_all_chip_id(void *id_info);
void hifc_tool_cnt_dec(void);
void hifc_tool_cnt_inc(void);
int hifc_get_device_id(void *hwdev, u16 *dev_id);
int hifc_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid);
bool hifc_is_valid_bar_addr(u64 offset);
void hifc_get_card_info(void *hwdev, void *bufin);
struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname);
void hifc_get_card_func_info_by_card_name(
const char *chip_name, struct hifc_card_func_info *card_func);
#endif

1010
hifc/hifc_tool_hw.c Normal file

File diff suppressed because it is too large Load Diff

72
hifc/hifc_utils.c Normal file
View File

@ -0,0 +1,72 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "hifc_utils.h"
#include "unf_log.h"
#include "unf_common.h"
void hifc_cpu_to_big64(void *v_addr, unsigned int size)
{
unsigned int index = 0;
unsigned int cnt = 0;
unsigned long long *temp = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_addr, dump_stack(); return);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
(size % HIFC_QWORD_BYTE) == 0, dump_stack(); return);
temp = (unsigned long long *)v_addr;
cnt = HIFC_SHIFT_TO_U64(size);
for (index = 0; index < cnt; index++) {
*temp = cpu_to_be64(*temp);
temp++;
}
}
void hifc_big_to_cpu64(void *v_addr, unsigned int size)
{
unsigned int index = 0;
unsigned int cnt = 0;
unsigned long long *tmp = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_addr, dump_stack(); return);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
(size % HIFC_QWORD_BYTE) == 0, dump_stack(); return);
tmp = (unsigned long long *)v_addr;
cnt = HIFC_SHIFT_TO_U64(size);
for (index = 0; index < cnt; index++) {
*tmp = be64_to_cpu(*tmp);
tmp++;
}
}
void hifc_cpu_to_big32(void *v_addr, unsigned int size)
{
unf_cpu_to_big_end(v_addr, size);
}
void hifc_big_to_cpu32(void *v_addr, unsigned int size)
{
if (size % UNF_BYTES_OF_DWORD)
dump_stack();
unf_big_end_to_cpu(v_addr, size);
}
unsigned int hifc_log2n(unsigned int val)
{
unsigned int result = 0;
unsigned int logn = (val >> 1);
while (logn) {
logn >>= 1;
result++;
}
return result;
}

360
hifc/hifc_utils.h Normal file
View File

@ -0,0 +1,360 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_UTILS_H__
#define __HIFC_UTILS_H__
#define UNF_ZERO 0
#define HIFC_BIT(n) (0x1UL << (n))
#define HIFC_BIT_0 HIFC_BIT(0)
#define HIFC_BIT_1 HIFC_BIT(1)
#define HIFC_BIT_2 HIFC_BIT(2)
#define HIFC_BIT_3 HIFC_BIT(3)
#define HIFC_BIT_4 HIFC_BIT(4)
#define HIFC_BIT_5 HIFC_BIT(5)
#define HIFC_BIT_6 HIFC_BIT(6)
#define HIFC_BIT_7 HIFC_BIT(7)
#define HIFC_BIT_8 HIFC_BIT(8)
#define HIFC_BIT_9 HIFC_BIT(9)
#define HIFC_BIT_10 HIFC_BIT(10)
#define HIFC_BIT_11 HIFC_BIT(11)
#define HIFC_BIT_12 HIFC_BIT(12)
#define HIFC_BIT_13 HIFC_BIT(13)
#define HIFC_BIT_14 HIFC_BIT(14)
#define HIFC_BIT_15 HIFC_BIT(15)
#define HIFC_BIT_16 HIFC_BIT(16)
#define HIFC_BIT_17 HIFC_BIT(17)
#define HIFC_BIT_18 HIFC_BIT(18)
#define HIFC_BIT_19 HIFC_BIT(19)
#define HIFC_BIT_20 HIFC_BIT(20)
#define HIFC_BIT_21 HIFC_BIT(21)
#define HIFC_BIT_22 HIFC_BIT(22)
#define HIFC_BIT_23 HIFC_BIT(23)
#define HIFC_BIT_24 HIFC_BIT(24)
#define HIFC_BIT_25 HIFC_BIT(25)
#define HIFC_BIT_26 HIFC_BIT(26)
#define HIFC_BIT_27 HIFC_BIT(27)
#define HIFC_BIT_28 HIFC_BIT(28)
#define HIFC_BIT_29 HIFC_BIT(29)
#define HIFC_BIT_30 HIFC_BIT(30)
#define HIFC_BIT_31 HIFC_BIT(31)
#define HIFC_GET_BITS(data, mask) ((data) & (mask)) /* Obtains the bit */
#define HIFC_SET_BITS(data, mask) ((data) |= (mask)) /* set the bit */
#define HIFC_CLR_BITS(data, mask) ((data) &= ~(mask)) /* clear the bit */
/* Byte alignment */
#define HIFC_ALIGN_N(n) __attribute__((__packed, __aligned(n)))
#define HIFC_ALIGN_1 HIFC_ALIGN_N(1)
#define HIFC_ALIGN_2 HIFC_ALIGN_N(2)
#define HIFC_ALIGN_4 HIFC_ALIGN_N(4)
#define HIFC_ALIGN_8 HIFC_ALIGN_N(8)
#define HIFC_ADJUST_ALIGN_4(n) ((n) - (n) % 4)
#define HIFC_LSB(x) ((unsigned char)(x))
#define HIFC_MSB(x) ((unsigned char)((unsigned short)(x) >> 8))
#define HIFC_LSW(x) ((unsigned short)(x))
#define HIFC_MSW(x) ((unsigned short)((unsigned int)(x) >> 16))
#define HIFC_LSD(x) ((unsigned int)((unsigned long long)(x)))
#define HIFC_MSD(x) ((unsigned int)((((unsigned long long)(x)) >> 16) >> 16))
#define HIFC_BYTES_TO_QW_NUM(x) ((x) >> 3)
#define HIFC_BYTES_TO_DW_NUM(x) ((x) >> 2)
#define UNF_GET_SHIFTMASK(__src, __shift, __mask) \
(((__src) & (__mask)) >> (__shift))
#define UNF_FC_SET_SHIFTMASK(__des, __val, __shift, __mask)\
((__des) = \
(((__des) & ~(__mask)) | (((__val) << (__shift)) & (__mask))))
/* D_ID */
#define UNF_FC_HEADER_DID_MASK 0x00FFFFFF
#define UNF_FC_HEADER_DID_SHIFT 0
#define UNF_FC_HEADER_DID_DWORD 0
#define UNF_GET_FC_HEADER_DID(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DID_DWORD],\
UNF_FC_HEADER_DID_SHIFT, UNF_FC_HEADER_DID_MASK)
#define UNF_SET_FC_HEADER_DID(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DID_DWORD],\
__val, UNF_FC_HEADER_DID_SHIFT, UNF_FC_HEADER_DID_MASK)
/* R_CTL */
#define UNF_FC_HEADER_RCTL_MASK 0xFF000000
#define UNF_FC_HEADER_RCTL_SHIFT 24
#define UNF_FC_HEADER_RCTL_DWORD 0
#define UNF_GET_FC_HEADER_RCTL(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RCTL_DWORD],\
UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK)
#define UNF_SET_FC_HEADER_RCTL(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RCTL_DWORD],\
__val, UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK)
/* S_ID */
#define UNF_FC_HEADER_SID_MASK 0x00FFFFFF
#define UNF_FC_HEADER_SID_SHIFT 0
#define UNF_FC_HEADER_SID_DWORD 1
#define UNF_GET_FC_HEADER_SID(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SID_DWORD],\
UNF_FC_HEADER_SID_SHIFT, UNF_FC_HEADER_SID_MASK)
#define UNF_SET_FC_HEADER_SID(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SID_DWORD],\
__val, UNF_FC_HEADER_SID_SHIFT, UNF_FC_HEADER_SID_MASK)
/* CS_CTL */
#define UNF_FC_HEADER_CS_CTL_MASK 0xFF000000
#define UNF_FC_HEADER_CS_CTL_SHIFT 24
#define UNF_FC_HEADER_CS_CTL_DWORD 1
#define UNF_GET_FC_HEADER_CS_CTL(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_CS_CTL_DWORD],\
UNF_FC_HEADER_CS_CTL_SHIFT, UNF_FC_HEADER_CS_CTL_MASK)
#define UNF_SET_FC_HEADER_CS_CTL(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_CS_CTL_DWORD],\
__val, UNF_FC_HEADER_CS_CTL_SHIFT, UNF_FC_HEADER_CS_CTL_MASK)
/* F_CTL */
#define UNF_FC_HEADER_FCTL_MASK 0x00FFFFFF
#define UNF_FC_HEADER_FCTL_SHIFT 0
#define UNF_FC_HEADER_FCTL_DWORD 2
#define UNF_GET_FC_HEADER_FCTL(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_FCTL_DWORD],\
UNF_FC_HEADER_FCTL_SHIFT, UNF_FC_HEADER_FCTL_MASK)
#define UNF_SET_FC_HEADER_FCTL(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_FCTL_DWORD],\
__val, UNF_FC_HEADER_FCTL_SHIFT, UNF_FC_HEADER_FCTL_MASK)
/* TYPE */
#define UNF_FC_HEADER_TYPE_MASK 0xFF000000
#define UNF_FC_HEADER_TYPE_SHIFT 24
#define UNF_FC_HEADER_TYPE_DWORD 2
#define UNF_GET_FC_HEADER_TYPE(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_TYPE_DWORD],\
UNF_FC_HEADER_TYPE_SHIFT, UNF_FC_HEADER_TYPE_MASK)
#define UNF_SET_FC_HEADER_TYPE(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_TYPE_DWORD],\
__val, UNF_FC_HEADER_TYPE_SHIFT, UNF_FC_HEADER_TYPE_MASK)
/* SEQ_CNT */
#define UNF_FC_HEADER_SEQ_CNT_MASK 0x0000FFFF
#define UNF_FC_HEADER_SEQ_CNT_SHIFT 0
#define UNF_FC_HEADER_SEQ_CNT_DWORD 3
#define UNF_GET_FC_HEADER_SEQ_CNT(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_CNT_DWORD],\
UNF_FC_HEADER_SEQ_CNT_SHIFT, UNF_FC_HEADER_SEQ_CNT_MASK)
#define UNF_SET_FC_HEADER_SEQ_CNT(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_CNT_DWORD],\
__val, UNF_FC_HEADER_SEQ_CNT_SHIFT, UNF_FC_HEADER_SEQ_CNT_MASK)
/* DF_CTL */
#define UNF_FC_HEADER_DF_CTL_MASK 0x00FF0000
#define UNF_FC_HEADER_DF_CTL_SHIFT 16
#define UNF_FC_HEADER_DF_CTL_DWORD 3
#define UNF_GET_FC_HEADER_DF_CTL(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DF_CTL_DWORD],\
UNF_FC_HEADER_DF_CTL_SHIFT, UNF_FC_HEADER_DF_CTL_MASK)
#define UNF_SET_FC_HEADER_DF_CTL(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DF_CTL_DWORD],\
__val, UNF_FC_HEADER_DF_CTL_SHIFT, UNF_FC_HEADER_DF_CTL_MASK)
/* SEQ_ID */
#define UNF_FC_HEADER_SEQ_ID_MASK 0xFF000000
#define UNF_FC_HEADER_SEQ_ID_SHIFT 24
#define UNF_FC_HEADER_SEQ_ID_DWORD 3
#define UNF_GET_FC_HEADER_SEQ_ID(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_ID_DWORD],\
UNF_FC_HEADER_SEQ_ID_SHIFT, UNF_FC_HEADER_SEQ_ID_MASK)
#define UNF_SET_FC_HEADER_SEQ_ID(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_ID_DWORD],\
__val, UNF_FC_HEADER_SEQ_ID_SHIFT, UNF_FC_HEADER_SEQ_ID_MASK)
/* RX_ID */
#define UNF_FC_HEADER_RXID_MASK 0x0000FFFF
#define UNF_FC_HEADER_RXID_SHIFT 0
#define UNF_FC_HEADER_RXID_DWORD 4
#define UNF_GET_FC_HEADER_RXID(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RXID_DWORD],\
UNF_FC_HEADER_RXID_SHIFT, UNF_FC_HEADER_RXID_MASK)
#define UNF_SET_FC_HEADER_RXID(__pfcheader, __val)\
UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RXID_DWORD],\
__val, UNF_FC_HEADER_RXID_SHIFT, UNF_FC_HEADER_RXID_MASK)
/* OX_ID */
#define UNF_FC_HEADER_OXID_MASK 0xFFFF0000
#define UNF_FC_HEADER_OXID_SHIFT 16
#define UNF_FC_HEADER_OXID_DWORD 4
#define UNF_GET_FC_HEADER_OXID(__pfcheader)\
((unsigned short)UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_OXID_DWORD],\
UNF_FC_HEADER_OXID_SHIFT\
, UNF_FC_HEADER_OXID_MASK))
#define UNF_SET_FC_HEADER_OXID(__pfcheader, __val)\
(UNF_FC_SET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_OXID_DWORD],\
__val, UNF_FC_HEADER_OXID_SHIFT, UNF_FC_HEADER_OXID_MASK))
/* PRLI PARAM 3 */
#define HIFC_PRLI_PARAM_WXFER_ENABLE_MASK 0x00000001
#define HIFC_PRLI_PARAM_WXFER_ENABLE_SHIFT 0
#define HIFC_PRLI_PARAM_WXFER_DWORD 3
#define HIFC_GET_PRLI_PARAM_WXFER(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_WXFER_DWORD],\
HIFC_PRLI_PARAM_WXFER_ENABLE_SHIFT, HIFC_PRLI_PARAM_WXFER_ENABLE_MASK)
#define HIFC_PRLI_PARAM_CONF_ENABLE_MASK 0x00000080
#define HIFC_PRLI_PARAM_CONF_ENABLE_SHIFT 7
#define HIFC_PRLI_PARAM_CONF_DWORD 3
#define HIFC_GET_PRLI_PARAM_CONF(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_CONF_DWORD],\
HIFC_PRLI_PARAM_CONF_ENABLE_SHIFT, HIFC_PRLI_PARAM_CONF_ENABLE_MASK)
#define HIFC_PRLI_PARAM_REC_ENABLE_MASK 0x00000400
#define HIFC_PRLI_PARAM_REC_ENABLE_SHIFT 10
#define HIFC_PRLI_PARAM_CONF_REC 3
#define HIFC_GET_PRLI_PARAM_REC(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_CONF_REC],\
HIFC_PRLI_PARAM_REC_ENABLE_SHIFT, HIFC_PRLI_PARAM_REC_ENABLE_MASK)
#define HIFC_WQE_TYPE_MASK 0x000000FF
#define HIFC_WQE_TYPE_SHIFT 0
#define HIFC_WQE_TYPE_DWORD 0
#define HIFC_GET_WQE_TYPE_BE(__pfcheader)\
UNF_GET_SHIFTMASK(\
((unsigned int *)(void *)__pfcheader)[HIFC_WQE_TYPE_DWORD],\
HIFC_WQE_TYPE_SHIFT, HIFC_WQE_TYPE_MASK)
#define HIFC_MAKE_64BIT_ADDR(__high32, __low32) \
(unsigned long long)(((unsigned long long)(__high32) << 32) |\
(unsigned long long)(__low32))
#define HIFC_TRACE(log_id, log_att, log_level, fmt, ...) \
UNF_TRACE(log_id, log_att, log_level, fmt, ##__VA_ARGS__)
/* Valid check */
#define HIFC_CHECK(log_id, condition, fail_do) \
do { \
if (unlikely(!(condition))) { \
HIFC_TRACE((log_id), UNF_LOG_IO_ATT, UNF_ERR, \
"[err]Function:%s parameter check[%s] invalid",\
__func__, #condition); \
fail_do; \
} \
} while (0)
#define PRINT_IN_MBOX(dbg_level, data, count) \
do { \
unsigned int index = 0; \
if ((dbg_level) <= unf_dbg_level) { \
printk("HIFC send inbound mailbox: "); \
for (index = 0; index < (count) / 4; index++) { \
printk("%08x ", \
(((unsigned int *)(data))[index]));\
} \
printk("\n"); \
} \
} while (0)
#define PRINT_OUT_MBOX(dbg_level, data, count) \
do { \
unsigned int index = 0; \
if ((dbg_level) <= unf_dbg_level) { \
printk("HIFC receive outbound mailbox: "); \
for (index = 0; index < (count) / 4; index++) { \
printk("%08x ",\
(((unsigned int *)(data))[index]));\
} \
printk("\n"); \
} \
} while (0)
#define PRINT_INBOUND_IOB(dbg_level, data, count) \
do { \
unsigned int index = 0; \
if ((dbg_level) <= unf_dbg_level) { \
printk("HIFC send inbound iob: "); \
for (index = 0; index < (count) / 4; index++) { \
printk("%08x ",\
(((unsigned int *)(data))[index]));\
} \
printk("\n"); \
} \
} while (0)
#define PRINT_OUTBOUND_IOB(dbg_level, data, count) \
do { \
unsigned int index = 0; \
if ((dbg_level) <= unf_dbg_level) { \
printk("HIFC receive outbound iob: "); \
for (index = 0; index < (count) / 4; index++) { \
printk("%08x ",\
(((unsigned int *)(data))[index]));\
} \
printk("\n"); \
} \
} while (0)
#define HIFC_REFERNCE_VAR(ref, cmp, ret)
#define RETURN_ERROR_S32 (-1)
#define UNF_RETURN_ERROR_S32 (-1)
enum HIFC_HBA_ERR_STAT_E {
HIFC_STAT_CTXT_FLUSH_DONE = 0,
HIFC_STAT_SQ_WAIT_EMPTY,
HIFC_STAT_LAST_GS_SCQE,
HIFC_STAT_SQ_POOL_EMPTY,
HIFC_STAT_PARENT_IO_FLUSHED,
HIFC_STAT_ROOT_IO_FLUSHED, /* 5 */
HIFC_STAT_ROOT_SQ_FULL,
HIFC_STAT_ELS_RSP_EXCH_REUSE,
HIFC_STAT_GS_RSP_EXCH_REUSE,
HIFC_STAT_SQ_IO_BUFFER_CLEARED,
HIFC_STAT_PARENT_SQ_NOT_OFFLOADED, /* 10 */
HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK,
HIFC_STAT_PARENT_SQ_INVALID_CACHED_ID,
HIFC_HBA_STAT_BUTT
};
#define HIFC_DWORD_BYTE 4
#define HIFC_QWORD_BYTE 8
#define HIFC_SHIFT_TO_U64(x) ((x) >> 3)
#define HIFC_SHIFT_TO_U32(x) ((x) >> 2)
void hifc_cpu_to_big64(void *v_addr, unsigned int size);
void hifc_big_to_cpu64(void *v_addr, unsigned int size);
void hifc_cpu_to_big32(void *v_addr, unsigned int size);
void hifc_big_to_cpu32(void *v_addr, unsigned int size);
unsigned int hifc_log2n(unsigned int val);
#endif /* __HIFC_UTILS_H__ */

624
hifc/hifc_wq.c Normal file
View File

@ -0,0 +1,624 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/vmalloc.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwif.h"
#include "hifc_wq.h"
#define WQS_MAX_NUM_BLOCKS 128
#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
sizeof((wqs)->free_blocks[0]))
static void wqs_return_block(struct hifc_wqs *wqs, u32 page_idx, u32 block_idx)
{
u32 pos;
spin_lock(&wqs->alloc_blocks_lock);
wqs->num_free_blks++;
pos = wqs->return_blk_pos++;
pos &= WQS_MAX_NUM_BLOCKS - 1;
wqs->free_blocks[pos].page_idx = page_idx;
wqs->free_blocks[pos].block_idx = block_idx;
spin_unlock(&wqs->alloc_blocks_lock);
}
static int wqs_next_block(struct hifc_wqs *wqs, u32 *page_idx,
u32 *block_idx)
{
u32 pos;
spin_lock(&wqs->alloc_blocks_lock);
if (wqs->num_free_blks <= 0) {
spin_unlock(&wqs->alloc_blocks_lock);
return -ENOMEM;
}
wqs->num_free_blks--;
pos = wqs->alloc_blk_pos++;
pos &= WQS_MAX_NUM_BLOCKS - 1;
*page_idx = wqs->free_blocks[pos].page_idx;
*block_idx = wqs->free_blocks[pos].block_idx;
wqs->free_blocks[pos].page_idx = 0xFFFFFFFF;
wqs->free_blocks[pos].block_idx = 0xFFFFFFFF;
spin_unlock(&wqs->alloc_blocks_lock);
return 0;
}
static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr,
u64 **shadow_vaddr, u64 page_sz)
{
dma_addr_t dma_addr = 0;
*vaddr = dma_alloc_coherent(handle, page_sz, &dma_addr,
GFP_KERNEL);
if (!*vaddr) {
sdk_err(handle, "Failed to allocate dma to wqs page\n");
return -ENOMEM;
}
if (!ADDR_4K_ALIGNED(dma_addr)) {
sdk_err(handle, "Cla is not 4k aligned!\n");
goto shadow_vaddr_err;
}
*paddr = (u64)dma_addr;
/* use vzalloc for big mem, shadow_vaddr only used at initialization */
*shadow_vaddr = vzalloc(page_sz);
if (!*shadow_vaddr) {
sdk_err(handle, "Failed to allocate shadow page vaddr\n");
goto shadow_vaddr_err;
}
return 0;
shadow_vaddr_err:
dma_free_coherent(handle, page_sz, *vaddr, dma_addr);
return -ENOMEM;
}
static int wqs_allocate_page(struct hifc_wqs *wqs, u32 page_idx)
{
return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx],
&wqs->page_paddr[page_idx],
&wqs->shadow_page_vaddr[page_idx],
WQS_PAGE_SIZE);
}
static void wqs_free_page(struct hifc_wqs *wqs, u32 page_idx)
{
dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE,
wqs->page_vaddr[page_idx],
(dma_addr_t)wqs->page_paddr[page_idx]);
vfree(wqs->shadow_page_vaddr[page_idx]);
}
static int cmdq_allocate_page(struct hifc_cmdq_pages *cmdq_pages)
{
return queue_alloc_page(cmdq_pages->dev_hdl,
&cmdq_pages->cmdq_page_vaddr,
&cmdq_pages->cmdq_page_paddr,
&cmdq_pages->cmdq_shadow_page_vaddr,
CMDQ_PAGE_SIZE);
}
static void cmdq_free_page(struct hifc_cmdq_pages *cmdq_pages)
{
dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE,
cmdq_pages->cmdq_page_vaddr,
(dma_addr_t)cmdq_pages->cmdq_page_paddr);
vfree(cmdq_pages->cmdq_shadow_page_vaddr);
}
static int alloc_wqes_shadow(struct hifc_wq *wq)
{
u64 size;
/* if wq->max_wqe_size == 0, we don't need to alloc shadow */
if (wq->max_wqe_size <= wq->wqebb_size)
return 0;
size = (u64)wq->num_q_pages * wq->max_wqe_size;
wq->shadow_wqe = kzalloc(size, GFP_KERNEL);
if (!wq->shadow_wqe) {
pr_err("Failed to allocate shadow wqe\n");
return -ENOMEM;
}
size = wq->num_q_pages * sizeof(wq->prod_idx);
wq->shadow_idx = kzalloc(size, GFP_KERNEL);
if (!wq->shadow_idx) {
pr_err("Failed to allocate shadow index\n");
goto shadow_idx_err;
}
return 0;
shadow_idx_err:
kfree(wq->shadow_wqe);
return -ENOMEM;
}
static void free_wqes_shadow(struct hifc_wq *wq)
{
if (wq->max_wqe_size <= wq->wqebb_size)
return;
kfree(wq->shadow_idx);
kfree(wq->shadow_wqe);
}
static void free_wq_pages(void *handle, struct hifc_wq *wq,
u32 num_q_pages)
{
u32 i;
for (i = 0; i < num_q_pages; i++)
hifc_dma_free_coherent_align(handle, &wq->mem_align[i]);
free_wqes_shadow(wq);
wq->block_vaddr = NULL;
wq->shadow_block_vaddr = NULL;
kfree(wq->mem_align);
}
static int alloc_wq_pages(void *dev_hdl, struct hifc_wq *wq)
{
struct hifc_dma_addr_align *mem_align;
u64 *vaddr, *paddr;
u32 i, num_q_pages;
int err;
vaddr = wq->shadow_block_vaddr;
paddr = wq->block_vaddr;
num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
if (num_q_pages > WQ_MAX_PAGES) {
sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n",
num_q_pages);
return -EINVAL;
}
if (num_q_pages & (num_q_pages - 1)) {
sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n",
num_q_pages);
return -EINVAL;
}
wq->num_q_pages = num_q_pages;
err = alloc_wqes_shadow(wq);
if (err) {
sdk_err(dev_hdl, "Failed to allocate wqe shadow\n");
return err;
}
wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align),
GFP_KERNEL);
if (!wq->mem_align) {
sdk_err(dev_hdl, "Failed to allocate mem_align\n");
free_wqes_shadow(wq);
return -ENOMEM;
}
for (i = 0; i < num_q_pages; i++) {
mem_align = &wq->mem_align[i];
err = hifc_dma_alloc_coherent_align(dev_hdl, wq->wq_page_size,
wq->wq_page_size,
GFP_KERNEL, mem_align);
if (err) {
sdk_err(dev_hdl, "Failed to allocate wq page\n");
goto alloc_wq_pages_err;
}
*paddr = cpu_to_be64(mem_align->align_paddr);
*vaddr = (u64)mem_align->align_vaddr;
paddr++;
vaddr++;
}
return 0;
alloc_wq_pages_err:
free_wq_pages(dev_hdl, wq, i);
return -ENOMEM;
}
int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq,
u32 wqebb_size, u32 wq_page_size, u16 q_depth,
u32 max_wqe_size)
{
u32 num_wqebbs_per_page;
int err;
if (wqebb_size == 0) {
sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n");
return -EINVAL;
}
if (q_depth & (q_depth - 1)) {
sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n",
q_depth);
return -EINVAL;
}
if (wq_page_size & (wq_page_size - 1)) {
sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n",
wq_page_size);
return -EINVAL;
}
num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n",
num_wqebbs_per_page);
return -EINVAL;
}
err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
if (err) {
sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n");
return err;
}
wq->wqebb_size = wqebb_size;
wq->wq_page_size = wq_page_size;
wq->q_depth = q_depth;
wq->max_wqe_size = max_wqe_size;
wq->num_wqebbs_per_page = num_wqebbs_per_page;
wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page);
wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
err = alloc_wq_pages(wqs->dev_hdl, wq);
if (err) {
sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n");
goto alloc_wq_pages_err;
}
atomic_set(&wq->delta, q_depth);
wq->cons_idx = 0;
wq->prod_idx = 0;
wq->mask = q_depth - 1;
return 0;
alloc_wq_pages_err:
wqs_return_block(wqs, wq->page_idx, wq->block_idx);
return err;
}
void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq)
{
free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages);
wqs_return_block(wqs, wq->page_idx, wq->block_idx);
}
static void init_wqs_blocks_arr(struct hifc_wqs *wqs)
{
u32 page_idx, blk_idx, pos = 0;
for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
wqs->free_blocks[pos].page_idx = page_idx;
wqs->free_blocks[pos].block_idx = blk_idx;
pos++;
}
}
wqs->alloc_blk_pos = 0;
wqs->return_blk_pos = 0;
wqs->num_free_blks = WQS_MAX_NUM_BLOCKS;
spin_lock_init(&wqs->alloc_blocks_lock);
}
void hifc_wq_wqe_pg_clear(struct hifc_wq *wq)
{
u64 *block_vaddr;
u32 pg_idx;
block_vaddr = wq->shadow_block_vaddr;
atomic_set(&wq->delta, wq->q_depth);
wq->cons_idx = 0;
wq->prod_idx = 0;
for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++)
memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size);
}
int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages,
struct hifc_wq *wq, void *dev_hdl,
int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
u16 q_depth, u32 max_wqe_size)
{
int i, j, err = -ENOMEM;
if (q_depth & (q_depth - 1)) {
sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n",
q_depth);
return -EINVAL;
}
cmdq_pages->dev_hdl = dev_hdl;
err = cmdq_allocate_page(cmdq_pages);
if (err) {
sdk_err(dev_hdl, "Failed to allocate CMDQ page\n");
return err;
}
for (i = 0; i < cmdq_blocks; i++) {
wq[i].page_idx = 0;
wq[i].block_idx = (u32)i;
wq[i].wqebb_size = wqebb_size;
wq[i].wq_page_size = wq_page_size;
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page =
ALIGN(wq_page_size, wqebb_size) / wqebb_size;
wq[i].wqebbs_per_page_shift =
(u32)ilog2(wq[i].num_wqebbs_per_page);
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]);
if (err) {
sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n");
goto cmdq_block_err;
}
atomic_set(&wq[i].delta, q_depth);
wq[i].cons_idx = 0;
wq[i].prod_idx = 0;
wq[i].mask = q_depth - 1;
}
return 0;
cmdq_block_err:
for (j = 0; j < i; j++)
free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages);
cmdq_free_page(cmdq_pages);
return err;
}
void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages,
struct hifc_wq *wq, int cmdq_blocks)
{
int i;
for (i = 0; i < cmdq_blocks; i++)
free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages);
cmdq_free_page(cmdq_pages);
}
static int alloc_page_addr(struct hifc_wqs *wqs)
{
u64 size = wqs->num_pages * sizeof(*wqs->page_paddr);
wqs->page_paddr = kzalloc(size, GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
size = wqs->num_pages * sizeof(*wqs->page_vaddr);
wqs->page_vaddr = kzalloc(size, GFP_KERNEL);
if (!wqs->page_vaddr)
goto page_vaddr_err;
size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto page_shadow_vaddr_err;
return 0;
page_shadow_vaddr_err:
kfree(wqs->page_vaddr);
page_vaddr_err:
kfree(wqs->page_paddr);
return -ENOMEM;
}
static void free_page_addr(struct hifc_wqs *wqs)
{
kfree(wqs->shadow_page_vaddr);
kfree(wqs->page_vaddr);
kfree(wqs->page_paddr);
}
int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl)
{
u32 i, page_idx;
int err;
wqs->dev_hdl = dev_hdl;
wqs->num_pages = WQ_NUM_PAGES(num_wqs);
if (alloc_page_addr(wqs)) {
sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n");
return -ENOMEM;
}
for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
err = wqs_allocate_page(wqs, page_idx);
if (err) {
sdk_err(dev_hdl, "Failed wq page allocation\n");
goto wq_allocate_page_err;
}
}
wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL);
if (!wqs->free_blocks) {
err = -ENOMEM;
goto alloc_blocks_err;
}
init_wqs_blocks_arr(wqs);
return 0;
alloc_blocks_err:
wq_allocate_page_err:
for (i = 0; i < page_idx; i++)
wqs_free_page(wqs, i);
free_page_addr(wqs);
return err;
}
void hifc_wqs_free(struct hifc_wqs *wqs)
{
u32 page_idx;
for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
wqs_free_page(wqs, page_idx);
free_page_addr(wqs);
kfree(wqs->free_blocks);
}
static void copy_wqe_to_shadow(struct hifc_wq *wq, void *shadow_addr,
int num_wqebbs, u16 prod_idx)
{
u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr;
u32 i, offset;
u16 idx;
for (i = 0; i < (u32)num_wqebbs; i++) {
offset = i * wq->wqebb_size;
shadow_wqebb_addr = (u8 *)shadow_addr + offset;
idx = MASKED_WQE_IDX(wq, prod_idx + i);
wqe_page_addr = WQ_PAGE_ADDR(wq, idx);
wqebb_addr = wqe_page_addr +
WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx));
memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size);
}
}
void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index)
{
return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index);
}
u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq)
{
return be64_to_cpu(*wq->block_vaddr);
}
void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx)
{
u32 curr_pg, end_pg;
u16 curr_prod_idx, end_prod_idx;
if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) {
atomic_add(num_wqebbs, &wq->delta);
return NULL;
}
/* use original cur_pi and end_pi, no need queue depth mask as
* WQE_PAGE_NUM will do num_queue_pages mask
*/
curr_prod_idx = (u16)wq->prod_idx;
wq->prod_idx += num_wqebbs;
/* end prod index should points to the last wqebb of wqe,
* therefore minus 1
*/
end_prod_idx = (u16)wq->prod_idx - 1;
curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
*prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
u32 offset = curr_pg * wq->max_wqe_size;
u8 *shadow_addr = wq->shadow_wqe + offset;
wq->shadow_idx[curr_pg] = *prod_idx;
return shadow_addr;
}
return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
}
void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs)
{
atomic_add(num_wqebbs, &wq->delta);
wq->cons_idx += num_wqebbs;
}
void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx)
{
u32 curr_pg, end_pg;
u16 curr_cons_idx, end_cons_idx;
if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
return NULL;
curr_cons_idx = (u16)wq->cons_idx;
curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
*cons_idx = curr_cons_idx;
if (curr_pg != end_pg) {
u32 offset = curr_pg * wq->max_wqe_size;
u8 *shadow_addr = wq->shadow_wqe + offset;
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
return shadow_addr;
}
return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
}

164
hifc/hifc_wq.h Normal file
View File

@ -0,0 +1,164 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_WQ_H
#define HIFC_WQ_H
#define WQS_BLOCKS_PER_PAGE 4
#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size)
#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
((wq)->num_q_pages - 1))
#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \
((idx) & ((wq)->num_wqebbs_per_page - 1)))
#define WQ_PAGE_ADDR_SIZE sizeof(u64)
#define WQ_PAGE_ADDR_SIZE_SHIFT 3
#define WQ_PAGE_ADDR(wq, idx) \
(u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
(WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT)))
#define WQ_BLOCK_SIZE 4096UL
#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
#define CMDQ_BLOCKS_PER_PAGE 8
#define CMDQ_BLOCK_SIZE 512UL
#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \
CMDQ_BLOCK_SIZE), PAGE_SIZE)
#define ADDR_4K_ALIGNED(addr) (((addr) & 0xfff) == 0)
#define WQ_BASE_VADDR(wqs, wq) \
(u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \
+ (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
#define WQ_BASE_ADDR(wqs, wq) \
(u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
(u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
(((u64)((cmdq_pages)->cmdq_page_paddr)) \
+ (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
(u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
#define WQ_NUM_PAGES(num_wqs) \
(ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
#define MAX_WQE_SIZE(max_sge, wqebb_size) \
((max_sge <= 2) ? (wqebb_size) : \
((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size)))
struct hifc_free_block {
u32 page_idx;
u32 block_idx;
};
struct hifc_wq {
/* The addresses are 64 bit in the HW */
u64 block_paddr;
u64 *shadow_block_vaddr;
u64 *block_vaddr;
u32 wqebb_size;
u32 wq_page_size;
u16 q_depth;
u32 max_wqe_size;
u32 num_wqebbs_per_page;
/* performance: replace mul/div as shift;
* num_wqebbs_per_page must be power of 2
*/
u32 wqebbs_per_page_shift;
u32 page_idx;
u32 block_idx;
u32 num_q_pages;
struct hifc_dma_addr_align *mem_align;
int cons_idx;
int prod_idx;
atomic_t delta;
u16 mask;
u8 *shadow_wqe;
u16 *shadow_idx;
};
struct hifc_cmdq_pages {
/* The addresses are 64 bit in the HW */
u64 cmdq_page_paddr;
u64 *cmdq_page_vaddr;
u64 *cmdq_shadow_page_vaddr;
void *dev_hdl;
};
struct hifc_wqs {
/* The addresses are 64 bit in the HW */
u64 *page_paddr;
u64 **page_vaddr;
u64 **shadow_page_vaddr;
struct hifc_free_block *free_blocks;
u32 alloc_blk_pos;
u32 return_blk_pos;
int num_free_blks;
/* for allocate blocks */
spinlock_t alloc_blocks_lock;
u32 num_pages;
void *dev_hdl;
};
void hifc_wq_wqe_pg_clear(struct hifc_wq *wq);
int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages,
struct hifc_wq *wq, void *dev_hdl,
int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
u16 q_depth, u32 max_wqe_size);
void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages,
struct hifc_wq *wq, int cmdq_blocks);
int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl);
void hifc_wqs_free(struct hifc_wqs *wqs);
int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq,
u32 wqebb_size, u32 wq_page_size, u16 q_depth,
u32 max_wqe_size);
void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq);
void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index);
u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq);
void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx);
void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs);
void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx);
#endif

667
hifc/hifc_wqe.c Normal file
View File

@ -0,0 +1,667 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Fabric Channel Linux driver
* Copyright(c) 2018 Huawei Technologies Co., Ltd
*
*/
#include "hifc_module.h"
#include "hifc_service.h"
void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl,
unsigned char v_task_len)
{
/* "BDSL" field of CtrlS - defines the size of BDS,
* which varies from 0 to 2040 bytes (8 bits of 8 bytes' chunk)
*/
v_ctrl_sl->ch.wd0.bdsl = 0;
/*
* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from
* 0 to 24 bytes
*/
v_ctrl_sl->ch.wd0.drv_sl = 0;
/* a.
* b1 - linking WQE, which will be only used in linked page architecture
* instead of ring, it's a special control WQE which does not contain
* any buffer or inline data information, and will only be consumed by
* hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either
* normal SEG WQE or inline data WQE
*/
v_ctrl_sl->ch.wd0.wf = 0;
/*
* "CF" field of CtrlS - Completion Format - defines the format of CS.
* a.b0 - Status information is embedded inside of Completion Section
* b.b1 - Completion Section keeps SGL, where Status information
* should be written. (For the definition of SGLs see ?4.1* .)
*/
v_ctrl_sl->ch.wd0.cf = 0;
/*
* "TSL" field of CtrlS - defines the size of TS, which varies from 0
* to 248 bytes
*/
v_ctrl_sl->ch.wd0.tsl = v_task_len;
/*
* Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE
* format is of two types, which are defined by "VA " field of CtrlS.
* "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's
* pointer and 31-bits Length, each SGE can only support up to 2G-1B,
* it can guarantee each single SGE length can not exceed 2GB by nature,
* A byte count value of zero means a 0byte data transfer.o b1.
* SGE comprises 64-bits buffer's pointer, 31-bits Length and 30-bits
* Key of the Translation table ,each SGE can only support up to 2G-1B,
* it can guarantee each single SGE length can notexceed 2GB by nature,
* A byte count value of zero means a 0byte data transfer
*/
v_ctrl_sl->ch.wd0.va = 0;
/*
* "DF" field of CtrlS - Data Format - defines the format of BDS
* a. b0 - BDS carries the list of SGEs (SGL)
* b. b1 - BDS carries the inline data
*/
v_ctrl_sl->ch.wd0.df = 0;
/*
* "CR" - Completion is Required - marks CQE generation request per WQE
*/
v_ctrl_sl->ch.wd0.cr = 1;
/*
* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from
* 0 to 56 bytes
*/
v_ctrl_sl->ch.wd0.dif_sl = 0;
/*
* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to
* 24 bytes
*/
v_ctrl_sl->ch.wd0.csl = 0;
/* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks.
*The value Zero is not valid
*/
v_ctrl_sl->ch.wd0.ctrl_sl = 1;
/* "O" - Owner - marks ownership of WQE */
v_ctrl_sl->ch.wd0.owner = 0;
}
void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe)
{
/* "BDSL" field of CtrlS - defines the size of BDS, which varies from
* 0 to 2040 bytes (8 bits of 8 bytes' chunk)
*/
/* TrdWqe carry 2 SGE defaultly, 4DW per SGE, the value is 4 because
* unit is 2DW, in double SGL mode, bdsl is 2
*/
v_sqe->ctrl_sl.ch.wd0.bdsl = HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE;
/*
* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from
* 0 to 24 bytes DrvSL config for 0
*/
v_sqe->ctrl_sl.ch.wd0.drv_sl = 0;
/* a. b1 - linking WQE, which will be only used in linked page
* architecture instead of ring, it's a special control WQE which does
* not contain any buffer or inline data information, and will only be
* consumed by hardware. The size is aligned to WQEBB/WQE b0 - normal
* WQE, either normal SEG WQE or inline data WQE
*/
/* normal wqe */
v_sqe->ctrl_sl.ch.wd0.wf = 0;
/*
* "CF" field of CtrlS - Completion Format - defines the format of CS.
* a.b0 - Status information is embedded inside of Completion Section
* b.b1 - Completion Section keeps SGL, where Status information
* should be written. (For the definition of SGLs see ?4.1.)
*/
/* by SCQE mode, the value is ignored */
v_sqe->ctrl_sl.ch.wd0.cf = 0;
/* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to
* 248 bytes
*/
/* TSL is configured by 56 bytes */
v_sqe->ctrl_sl.ch.wd0.tsl = sizeof(struct hifcoe_sqe_ts_s) /
HIFC_WQE_SECTION_CHUNK_SIZE;
/*
* Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE
* format is of two types, which are defined by "VA" field of CtrlS.
* "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's
* pointer and 31-bits Length, each SGE can only support up to 2G-1B, it
* can guarantee each single SGE length can not exceed 2GB by nature, A
* byte count value of zero means a 0byte data transfer. o b1. SGE
* comprises 64-bits buffer's pointer, 31-bits Length and 30-bits Key of
* the Translation table , each SGE can only support up to 2G-1B, it can
* guarantee each single SGE length can not exceed 2GB by nature, A byte
* count value of zero means a 0byte data transfer
*/
v_sqe->ctrl_sl.ch.wd0.va = 0;
/*
* "DF" field of CtrlS - Data Format - defines the format of BDS
* a. b0 - BDS carries the list of SGEs (SGL)
* b. b1 - BDS carries the inline data
*/
v_sqe->ctrl_sl.ch.wd0.df = 0;
/* "CR" - Completion is Required marks CQE generation request per WQE */
/* by SCQE mode, this value is ignored */
v_sqe->ctrl_sl.ch.wd0.cr = 1;
/*
* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from
* 0 to 56 bytes.
*/
v_sqe->ctrl_sl.ch.wd0.dif_sl = 0;
/*
* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to
* 24 bytes
*/
v_sqe->ctrl_sl.ch.wd0.csl = 0;
/* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks.
* The value Zero is not valid.
*/
v_sqe->ctrl_sl.ch.wd0.ctrl_sl = HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE;
/* "O" - Owner - marks ownership of WQE */
v_sqe->ctrl_sl.ch.wd0.owner = 0;
}
void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts,
unsigned int rport_index,
unsigned short local_xid,
unsigned short remote_xid,
unsigned short data_len)
{
v_sqe_ts->local_xid = local_xid;
v_sqe_ts->wd0.conn_id = (unsigned short)rport_index;
v_sqe_ts->wd0.remote_xid = remote_xid;
v_sqe_ts->cont.els_gs_elsrsp_comm.data_len = data_len;
}
void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr,
unsigned long long v_phy_addr,
unsigned int buf_len,
unsigned int xid, void *v_hba)
{
unsigned long long els_rsp_phy_addr;
struct hifcoe_variable_sge_s *psge = NULL;
/* Fill in SGE and convert it to big-endian. */
psge = &v_sqe->sge[0];
els_rsp_phy_addr = v_phy_addr;
psge->buf_addr_hi = HIFC_HIGH_32_BITS(els_rsp_phy_addr);
psge->buf_addr_lo = HIFC_LOW_32_BITS(els_rsp_phy_addr);
psge->wd0.buf_len = buf_len;
psge->wd0.r_flag = 0;
psge->wd1.extension_flag = HIFC_WQE_SGE_NOT_EXTEND_FLAG;
psge->wd1.buf_addr_gpa = (psge->buf_addr_lo >> 16);
psge->wd1.xid = (xid & 0x3fff);
psge->wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG;
hifc_cpu_to_big32(psge, sizeof(*psge));
/* Converts the payload of an FC frame into a big end. */
hifc_cpu_to_big32(v_buf_addr, buf_len);
}
void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info,
void *v_frame_pld, unsigned short type,
unsigned short cmnd, unsigned int v_scqn)
{
struct unf_pril_payload_s *pri_acc_pld = NULL;
struct hifcoe_sqe_els_rsp_s *els_rsp = NULL;
struct hifcoe_sqe_ts_s *sqe_ts = NULL;
struct hifc_parent_sq_info_s *sq_info = NULL;
struct hifc_hba_s *hba = NULL;
UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sqe, return);
UNF_CHECK_VALID(0x5015, UNF_TRUE, v_frame_pld, return);
UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sq_info, return);
sqe_ts = &v_sqe->ts_sl;
els_rsp = &sqe_ts->cont.els_rsp;
sqe_ts->task_type = HIFC_SQE_ELS_RSP;
/* The default chip does not need to update parameters. */
els_rsp->wd1.para_update = 0x0;
sq_info = (struct hifc_parent_sq_info_s *)v_sq_info;
hba = (struct hifc_hba_s *)sq_info->phba;
/* When the PLOGI request is sent, the microcode needs to be instructed
* to clear the I/O related to the link to avoid data inconsistency
* caused by the disorder of the IO.
*/
if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) {
els_rsp->wd1.clr_io = 1;
els_rsp->wd6.reset_exch_start = hba->exit_base;
els_rsp->wd6.reset_exch_end = hba->exit_base +
(hba->exit_count - 1);
els_rsp->wd7.scqn = v_scqn;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) send cmd(0x%x) to RPort(0x%x),rport index(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.",
sq_info->local_port_id,
cmnd,
sq_info->remote_port_id,
sq_info->rport_index,
els_rsp->wd6.reset_exch_start,
els_rsp->wd6.reset_exch_end,
v_scqn);
return;
}
if (type == ELS_RJT)
return;
/*
* Enter WQE in the PrliAcc negotiation parameter, and fill in the
* Update flag in WQE.
*/
if (cmnd == ELS_PRLI) {
/* The chip updates the PLOGI ACC negotiation parameters. */
els_rsp->wd2.seq_cnt = sq_info->plogi_coparams.seq_cnt;
els_rsp->wd2.e_d_tov = sq_info->plogi_coparams.ed_tov;
els_rsp->wd2.tx_mfs = sq_info->plogi_coparams.tx_mfs;
els_rsp->e_d_tov_timer_val =
sq_info->plogi_coparams.ed_tov_timer_val;
/* The chip updates the PRLI ACC parameter. */
pri_acc_pld = (struct unf_pril_payload_s *)v_frame_pld;
els_rsp->wd4.xfer_dis = HIFC_GET_PRLI_PARAM_WXFER(
pri_acc_pld->parms);
els_rsp->wd4.conf = HIFC_GET_PRLI_PARAM_CONF(
pri_acc_pld->parms);
els_rsp->wd4.rec = HIFC_GET_PRLI_PARAM_REC(pri_acc_pld->parms);
els_rsp->wd1.para_update = 0x03;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x,xfer_dis:0x%x, conf:0x%x,rec:0x%x.",
sq_info->local_port_id,
sq_info->rport_index, els_rsp->wd2.seq_cnt,
els_rsp->wd2.e_d_tov, els_rsp->wd2.tx_mfs,
els_rsp->e_d_tov_timer_val, els_rsp->wd4.xfer_dis,
els_rsp->wd4.conf, els_rsp->wd4.rec);
}
}
void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, void *v_sq_info,
unsigned short cmnd, unsigned int v_scqn,
void *v_frame_pld)
{
struct hifcoe_sqe_ts_s *v_sqe_ts = NULL;
struct hifcoe_sqe_t_els_gs_s *els_req = NULL;
struct hifc_parent_sq_info_s *sq_info = NULL;
struct hifc_hba_s *hba = NULL;
struct unf_rec_pld_s *rec_pld = NULL;
v_sqe_ts = &v_sqe->ts_sl;
v_sqe_ts->task_type = HIFC_SQE_ELS_CMND;
els_req = &v_sqe_ts->cont.t_els_gs;
sq_info = (struct hifc_parent_sq_info_s *)v_sq_info;
hba = (struct hifc_hba_s *)sq_info->phba;
/*
* When the PLOGI request is sent, the microcode needs to be instructed
* to clear the I/O related to the link to avoid data inconsistency
* caused by the disorder of the IO.
*/
if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) {
els_req->wd4.clr_io = 1;
els_req->wd6.reset_exch_start = hba->exit_base;
els_req->wd6.reset_exch_end = hba->exit_base +
(hba->exit_count - 1);
els_req->wd7.scqn = v_scqn;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) Rport(0x%x) SID(0x%x) send %s to DID(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.",
hba->port_cfg.port_id, sq_info->rport_index,
sq_info->local_port_id,
(cmnd == ELS_PLOGI) ? "PLOGI" : "LOGO",
sq_info->remote_port_id,
els_req->wd6.reset_exch_start,
els_req->wd6.reset_exch_end,
v_scqn);
return;
}
/* The chip updates the PLOGI ACC negotiation parameters. */
if (cmnd == ELS_PRLI) {
els_req->wd5.seq_cnt = sq_info->plogi_coparams.seq_cnt;
els_req->wd5.e_d_tov = sq_info->plogi_coparams.ed_tov;
els_req->wd5.tx_mfs = sq_info->plogi_coparams.tx_mfs;
els_req->e_d_tov_timer_val =
sq_info->plogi_coparams.ed_tov_timer_val;
els_req->wd4.rec_support = hba->port_cfg.tape_support ? 1 : 0;
els_req->wd4.para_update = 0x01;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x, e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x.",
sq_info->local_port_id, sq_info->rport_index,
els_req->wd5.seq_cnt, els_req->wd5.e_d_tov,
els_req->wd5.tx_mfs,
els_req->e_d_tov_timer_val);
}
if (cmnd == ELS_ECHO)
els_req->echo_flag = UNF_TRUE;
if (cmnd == ELS_REC) {
rec_pld = (struct unf_rec_pld_s *)v_frame_pld;
els_req->wd4.rec_flag = 1;
rec_pld->ox_id += hba->exit_base;
els_req->wd4.orign_oxid = rec_pld->ox_id;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) Rport(0x%x) SID(0x%x) send Rec to DID(0x%x), origin_oxid 0x%x",
hba->port_cfg.port_id, sq_info->rport_index,
sq_info->local_port_id,
sq_info->remote_port_id,
els_req->wd4.orign_oxid);
}
}
void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe,
unsigned short els_cmnd_type,
unsigned int v_magic_num)
{
struct hifcoe_sqe_t_els_gs_s *els_req;
struct hifcoe_sqe_els_rsp_s *els_rsp;
if (els_cmnd_type == ELS_ACC || els_cmnd_type == ELS_RJT) {
els_rsp = &v_sqe->ts_sl.cont.els_rsp;
els_rsp->magic_num = v_magic_num;
} else {
els_req = &v_sqe->ts_sl.cont.t_els_gs;
els_req->magic_num = v_magic_num;
}
}
void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int magic_num)
{
struct hifcoe_sqe_ts_s *v_sqe_ts = NULL;
struct hifcoe_sqe_t_els_gs_s *gs_req = NULL;
v_sqe_ts = &v_sqe->ts_sl;
v_sqe_ts->task_type = HIFC_SQE_GS_CMND;
gs_req = &v_sqe_ts->cont.t_els_gs;
gs_req->magic_num = magic_num;
}
void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int abts_param,
unsigned int magic_num)
{
struct hifcoe_sqe_abts_s *abts_ts;
v_sqe->ts_sl.task_type = HIFC_SQE_BLS_CMND;
abts_ts = &v_sqe->ts_sl.cont.abts;
abts_ts->fh_parm_abts = abts_param;
abts_ts->magic_num = magic_num;
}
void hifc_build_service_wqe_root_ts(void *v_hba,
struct hifc_root_sqe_s *v_rt_sqe,
unsigned int rx_id, unsigned int rport_id,
unsigned int scq_num)
{
unsigned char data_cos = 0;
unsigned int port_id = 0;
unsigned int service_type = 0;
struct hifc_hba_s *hba = NULL;
struct hifc_parent_queue_info_s *parent_queue_info = NULL;
hba = (struct hifc_hba_s *)v_hba;
port_id = HIFC_GET_HBA_PORT_ID(hba);
service_type = HIFC_GET_SERVICE_TYPE(hba);
if (rport_id >= UNF_HIFC_MAXRPORT_NUM) {
data_cos = HIFC_GET_PACKET_COS(service_type);
} else {
parent_queue_info =
&hba->parent_queue_mgr->parent_queues[rport_id];
data_cos = parent_queue_info->queue_data_cos;
}
v_rt_sqe->task_section.fc_dw0.exch_id = rx_id;
v_rt_sqe->task_section.fc_dw0.host_id = 0;
v_rt_sqe->task_section.fc_dw0.port_id = port_id;
v_rt_sqe->task_section.fc_dw0.off_load = HIFC_NO_OFFLOAD;
v_rt_sqe->task_section.fc_dw3.rport_index = HIFC_LSW(rport_id);
v_rt_sqe->task_section.fc_dw3.scq_num = HIFC_LSW(scq_num);
v_rt_sqe->task_section.fc_dw4.service_type = UNF_GET_SHIFTMASK(
service_type, 0, 0x1f);
v_rt_sqe->task_section.fc_dw4.pkt_type = HIFC_GET_PACKET_TYPE(
service_type);
v_rt_sqe->task_section.fc_dw4.pkt_cos = data_cos;
}
void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe,
void *v_buf_addr,
unsigned long long v_phy_addr,
unsigned int buf_len,
void *v_hba)
{
unsigned long long frame_phy_addr;
/* Enter the SGE and convert it to the big-endian mode. */
frame_phy_addr = v_phy_addr;
v_rt_sqe->sge.buf_addr_hi = HIFC_HIGH_32_BITS(frame_phy_addr);
v_rt_sqe->sge.buf_addr_lo = HIFC_LOW_32_BITS(frame_phy_addr);
v_rt_sqe->sge.wd0.buf_len = buf_len;
v_rt_sqe->sge.wd0.ext_flag = 0;
v_rt_sqe->sge.wd1.rsvd = 0;
hifc_cpu_to_big32(&v_rt_sqe->sge, sizeof(v_rt_sqe->sge));
/* Converting FC Frames into big Ends */
hifc_cpu_to_big32(v_buf_addr, buf_len);
}
void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe,
unsigned long long v_ctxt_addr,
unsigned int buf_len)
{
/* The SGE is filled in and converted to the big-endian mode. */
v_rt_sqe->ctx_sge.buf_addr_hi = HIFC_HIGH_32_BITS(v_ctxt_addr);
v_rt_sqe->ctx_sge.buf_addr_lo = HIFC_LOW_32_BITS(v_ctxt_addr);
v_rt_sqe->ctx_sge.wd0.buf_len = buf_len;
v_rt_sqe->ctx_sge.wd0.ext_flag = 0;
v_rt_sqe->ctx_sge.wd1.rsvd = 0;
hifc_cpu_to_big32(&v_rt_sqe->ctx_sge, sizeof(v_rt_sqe->ctx_sge));
}
void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe,
dma_addr_t ctxt_addr,
unsigned int xid)
{
/* update Task Section DW0.OFFLOAD */
v_rt_sqe->task_section.fc_dw0.off_load = HIFC_HAVE_OFFLOAD;
/* update Context GPA DW1~2 */
v_rt_sqe->task_section.fc_dw1.context_gpa_hi =
HIFC_HIGH_32_BITS(ctxt_addr);
v_rt_sqe->task_section.fc_dw2.context_gpa_lo =
HIFC_LOW_32_BITS(ctxt_addr);
/* fill Context DW4 */
v_rt_sqe->task_section.fc_dw4.parent_xid = xid;
v_rt_sqe->task_section.fc_dw4.csize = HIFC_CNTX_SIZE_T_256B;
/* The sqe of the offload request has two sge. The first is the packet,
* and the second is the ctx.
*/
v_rt_sqe->ctrl_section.ch.wd0.bdsl =
2 * HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s));
}
void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned int ts_size,
unsigned int bdsi)
{
v_wqe_cs->ch.wd0.bdsl = bdsi;
v_wqe_cs->ch.wd0.drv_sl = 0;
v_wqe_cs->ch.wd0.rsvd0 = 0;
v_wqe_cs->ch.wd0.wf = 0;
v_wqe_cs->ch.wd0.cf = 0;
v_wqe_cs->ch.wd0.tsl = ts_size;
v_wqe_cs->ch.wd0.va = 0;
v_wqe_cs->ch.wd0.df = 0;
v_wqe_cs->ch.wd0.cr = 1;
v_wqe_cs->ch.wd0.dif_sl = 0;
v_wqe_cs->ch.wd0.csl = 0;
/* divided by 8 */
v_wqe_cs->ch.wd0.ctrl_sl = HIFC_BYTES_TO_QW_NUM(sizeof(*v_wqe_cs));
v_wqe_cs->ch.wd0.owner = 0;
}
void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned short owner,
unsigned short pmsn)
{
v_wqe_cs->qsf.wqe_sn = pmsn;
v_wqe_cs->qsf.dump_wqe_sn = v_wqe_cs->qsf.wqe_sn;
v_wqe_cs->ch.wd0.owner = (unsigned int)owner;
}
void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe)
{
if (likely((v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TRESP) &&
(v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TMF_RESP))) {
/*
* Convert Control Secton and Task Section to big-endian. Before
* the SGE enters the queue, the upper-layer driver converts the
* SGE and Task Section to the big-endian mode.
*/
hifc_cpu_to_big32(&v_sqe->ctrl_sl, sizeof(v_sqe->ctrl_sl));
hifc_cpu_to_big32(&v_sqe->ts_sl, sizeof(v_sqe->ts_sl));
} else {
/*
* The HIFCOE_TASK_T_TRESP may use the SGE as the Task Section
* to convert the entire SQE into a large end.
*/
hifc_cpu_to_big32(v_sqe, sizeof(struct hifcoe_sqe_tresp_s));
}
}
void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe)
{
hifc_cpu_to_big32(&v_sqe->ctrl_section, sizeof(v_sqe->ctrl_section));
hifc_cpu_to_big32(&v_sqe->task_section, sizeof(v_sqe->task_section));
}
void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe,
enum hifcoe_task_type_e task_type,
unsigned short rx_id)
{
cmdqe->common.wd0.task_type = task_type;
cmdqe->common.wd0.rx_id = rx_id;
cmdqe->common.wd0.rsvd0 = 0;
}
#define HIFC_STANDARD_SIRT_ENABLE 1
#define HIFC_STANDARD_SIRT_DISABLE 0
#define HIFC_UNKNOWN_ID 0xFFFF
void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
unsigned char task_type,
unsigned short exit_base,
unsigned char v_port_idx)
{
v_sqe->ts_sl.local_xid = UNF_GET_OXID(v_pkg) + exit_base;
v_sqe->ts_sl.task_type = task_type;
v_sqe->ts_sl.wd0.conn_id =
(unsigned short)(v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]);
v_sqe->ts_sl.wd0.remote_xid = HIFC_UNKNOWN_ID;
}
void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_ts_s *v_sqe_ts)
{
struct hifcoe_sqe_icmnd_s *icmd = &v_sqe_ts->cont.icmnd;
void *phy_add = NULL;
struct hifc_hba_s *hba = NULL;
hba = (struct hifc_hba_s *)v_hba;
v_sqe_ts->cdb_type = 0;
memcpy(icmd->fcp_cmnd_iu, v_pkg->fcp_cmnd,
sizeof(struct unf_fcp_cmnd_s));
icmd->magic_num = UNF_GETXCHGALLOCTIME(v_pkg);
if (v_pkg->unf_rsp_pload_bl.buffer_ptr) {
phy_add = (void *)v_pkg->unf_rsp_pload_bl.buf_dma_addr;
icmd->rsp_gpa_hi = HIFC_HIGH_32_BITS(phy_add);
icmd->rsp_gpa_lo = HIFC_LOW_32_BITS(phy_add);
} else {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]INI Build WQE sense buffer should not be null,sid_did (0x%x_0x%x) oxid(0x%x) pkg type(0x%x) hot pool tag(0x%x).",
v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did,
UNF_GET_OXID(v_pkg),
v_pkg->type, UNF_GET_XCHG_TAG(v_pkg));
}
if (v_sqe_ts->task_type != HIFC_SQE_FCP_ITMF) {
icmd->info.tmf.w0.bs.reset_exch_start = hba->exit_base;
icmd->info.tmf.w0.bs.reset_exch_end = hba->exit_base +
hba->exit_count - 1;
icmd->info.tmf.w1.bs.reset_did = UNF_GET_DID(v_pkg);
/* delivers the marker status flag to the microcode. */
icmd->info.tmf.w1.bs.marker_sts = 1;
HIFC_GET_RESET_TYPE(UNF_GET_TASK_MGMT_FLAGS(
v_pkg->fcp_cmnd->control),
icmd->info.tmf.w1.bs.reset_type);
icmd->info.tmf.w2.bs.reset_sid = UNF_GET_SID(v_pkg);
memcpy(icmd->info.tmf.reset_lun, v_pkg->fcp_cmnd->lun,
sizeof(icmd->info.tmf.reset_lun));
}
}
void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe,
unsigned short owner,
unsigned short pmsn)
{
struct hifcoe_wqe_ctrl_ch_s *wqe_ctrls = NULL;
wqe_ctrls = &v_rqe->ctrl_sl.ch;
wqe_ctrls->wd0.owner = owner;
wqe_ctrls->wd0.ctrl_sl = sizeof(struct hifcoe_wqe_ctrl_s) >> 3;
wqe_ctrls->wd0.csl = 1;
wqe_ctrls->wd0.dif_sl = 0;
wqe_ctrls->wd0.cr = 1;
wqe_ctrls->wd0.df = 0;
wqe_ctrls->wd0.va = 0;
wqe_ctrls->wd0.tsl = 0;
wqe_ctrls->wd0.cf = 0;
wqe_ctrls->wd0.wf = 0;
wqe_ctrls->wd0.drv_sl = sizeof(struct hifcoe_rqe_drv_s) >> 3;
wqe_ctrls->wd0.bdsl = sizeof(struct hifcoe_constant_sge_s) >> 3;
v_rqe->ctrl_sl.wd0.wqe_msn = pmsn;
v_rqe->ctrl_sl.wd0.dump_wqe_msn = v_rqe->ctrl_sl.wd0.wqe_msn;
}

486
hifc/hifc_wqe.h Normal file
View File

@ -0,0 +1,486 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_WQE_H__
#define __HIFC_WQE_H__
#include "hifcoe_wqe.h"
#include "hifcoe_parent_context.h"
/* TGT WQE type */
/* DRV->uCode via Root or Parent SQ */
#define HIFC_SQE_FCP_TRD HIFCOE_TASK_T_TREAD
#define HIFC_SQE_FCP_TWR HIFCOE_TASK_T_TWRITE
#define HIFC_SQE_FCP_TRSP HIFCOE_TASK_T_TRESP
#define HIFC_SQE_FCP_TACK HIFCOE_TASK_T_TACK
#define HIFC_SQE_ELS_CMND HIFCOE_TASK_T_ELS
#define HIFC_SQE_ELS_RSP HIFCOE_TASK_T_ELS_RSP
#define HIFC_SQE_GS_CMND HIFCOE_TASK_T_GS
#define HIFC_SQE_BLS_CMND HIFCOE_TASK_T_ABTS
#define HIFC_SQE_FCP_IREAD HIFCOE_TASK_T_IREAD
#define HIFC_SQE_FCP_IWRITE HIFCOE_TASK_T_IWRITE
#define HIFC_SQE_FCP_ITMF HIFCOE_TASK_T_ITMF
#define HIFC_SQE_SESS_RST HIFCOE_TASK_T_SESS_RESET
#define HIFC_SQE_FCP_TMF_TRSP HIFCOE_TASK_T_TMF_RESP
/* DRV->uCode Via CMDQ */
#define HIFC_CMDQE_ABTS_RSP HIFCOE_TASK_T_ABTS_RSP
#define HIFC_CMDQE_ABORT HIFCOE_TASK_T_ABORT
#define HIFC_CMDQE_SESS_DIS HIFCOE_TASK_T_SESS_DIS
#define HIFC_CMDQE_SESS_DEL HIFCOE_TASK_T_SESS_DEL
/* uCode->Drv Via CMD SCQ */
#define HIFC_SCQE_FCP_TCMND HIFCOE_TASK_T_RCV_TCMND
#define HIFC_SCQE_ELS_CMND HIFCOE_TASK_T_RCV_ELS_CMD
#define HIFC_SCQE_ABTS_CMD HIFCOE_TASK_T_RCV_ABTS_CMD
#define HIFC_SCQE_FCP_IRSP HIFCOE_TASK_T_IRESP
#define HIFC_SCQE_FCP_ITMF_RSP HIFCOE_TASK_T_ITMF_RESP
/* uCode->Drv Via STS SCQ */
#define HIFC_SCQE_FCP_TSTS HIFCOE_TASK_T_TSTS
#define HIFC_SCQE_GS_RSP HIFCOE_TASK_T_RCV_GS_RSP
#define HIFC_SCQE_ELS_RSP HIFCOE_TASK_T_RCV_ELS_RSP
#define HIFC_SCQE_ABTS_RSP HIFCOE_TASK_T_RCV_ABTS_RSP
#define HIFC_SCQE_ELS_RSP_STS HIFCOE_TASK_T_ELS_RSP_STS
#define HIFC_SCQE_ABTS_RSP_STS HIFCOE_TASK_T_ABTS_RSP_STS
#define HIFC_SCQE_ABORT_STS HIFCOE_TASK_T_ABORT_STS
#define HIFC_SCQE_SESS_EN_STS HIFCOE_TASK_T_SESS_EN_STS
#define HIFC_SCQE_SESS_DIS_STS HIFCOE_TASK_T_SESS_DIS_STS
#define HIFC_SCQE_SESS_DEL_STS HIFCOE_TASK_T_SESS_DEL_STS
#define HIFC_SCQE_SESS_RST_STS HIFCOE_TASK_T_SESS_RESET_STS
#define HIFC_SCQE_ITMF_MARKER_STS HIFCOE_TASK_T_ITMF_MARKER_STS
#define HIFC_SCQE_ABTS_MARKER_STS HIFCOE_TASK_T_ABTS_MARKER_STS
#define HIFC_SCQE_FLUSH_SQ_STS HIFCOE_TASK_T_FLUSH_SQ_STS
#define HIFC_SCQE_BUF_CLEAR_STS HIFCOE_TASK_T_BUFFER_CLEAR_STS
#define HIFC_SCQE_CLEAR_SRQ_STS HIFCOE_TASK_T_CLEAR_SRQ_STS
#define HIFC_LOW_32_BITS(__addr) \
((unsigned int)((unsigned long long)(__addr) & 0xffffffff))
#define HIFC_HIGH_32_BITS(__addr)\
((unsigned int)(((unsigned long long)(__addr) >> 32) & 0xffffffff))
/* Error Code from SCQ */
#define HIFC_COMPLETION_STATUS_SUCCESS FCOE_CQE_COMPLETED
#define HIFC_COMPLETION_STATUS_ABORTED_SETUP_FAIL FCOE_IMMI_CMDPKT_SETUP_FAIL
#define HIFC_COMPLETION_STATUS_TIMEOUT FCOE_ERROR_CODE_E_D_TIMER_EXPIRE
#define HIFC_COMPLETION_STATUS_DIF_ERROR FCOE_ERROR_CODE_DATA_DIFX_FAILED
#define HIFC_COMPLETION_STATUS_DATA_OOO FCOE_ERROR_CODE_DATA_OOO_RO
#define HIFC_COMPLETION_STATUS_DATA_OVERFLOW \
FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS
#define HIFC_SCQE_INVALID_CONN_ID 0xffff
#define HIFC_GET_SCQE_TYPE(scqe) ((scqe)->common.ch.wd0.task_type)
#define HIFC_GET_SCQE_STATUS(scqe) ((scqe)->common.ch.wd0.err_code)
#define HIFC_GET_SCQE_REMAIN_CNT(scqe) ((scqe)->common.ch.wd0.cqe_remain_cnt)
#define HIFC_GET_SCQE_CONN_ID(scqe) ((scqe)->common.conn_id)
#define HIFC_GET_WQE_TYPE(wqe) ((wqe)->ts_sl.task_type)
#define HIFC_WQE_IS_IO(wqe) \
(HIFC_GET_WQE_TYPE(wqe) != HIFC_SQE_SESS_RST)
#define HIFC_SCQE_HAS_ERRCODE(scqe) \
(HIFC_GET_SCQE_STATUS(scqe) != HIFC_COMPLETION_STATUS_SUCCESS)
#define HIFC_SCQE_ERR_TO_CM(scqe)\
(HIFC_GET_SCQE_STATUS(scqe) != FCOE_ELS_GS_RSP_EXCH_CHECK_FAIL)
#define HIFC_SCQE_CONN_ID_VALID(scqe) \
(HIFC_GET_SCQE_CONN_ID(scqe) != HIFC_SCQE_INVALID_CONN_ID)
#define HIFC_WQE_SECTION_CHUNK_SIZE 8 /* 8 bytes' chunk */
#define HIFC_T_RESP_WQE_CTR_TSL_SIZE 15 /* 8 bytes' chunk */
#define HIFC_T_RD_WR_WQE_CTR_TSL_SIZE 9 /* 8 bytes' chunk */
#define HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE 4 /* 8 bytes' chunk */
#define HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE 1 /* 8 bytes' chunk */
#define HIFC_WQE_SGE_ENTRY_NUM 2 /* BD SGE and DIF SGE count */
#define HIFC_WQE_SGE_DIF_ENTRY_NUM 1 /* DIF SGE count */
#define HIFC_WQE_SGE_LAST_FLAG 1
#define HIFC_WQE_SGE_NOT_LAST_FLAG 0
#define HIFC_WQE_SGE_EXTEND_FLAG 1
#define HIFC_WQE_SGE_NOT_EXTEND_FLAG 0
#define HIFC_FCP_TMF_PORT_RESET 0
#define HIFC_FCP_TMF_LUN_RESET 1
#define HIFC_FCP_TMF_TGT_RESET 2
#define HIFC_FCP_TMF_RSVD 3
#define HIFC_NO_OFFLOAD 0
#define HIFC_HAVE_OFFLOAD 1
#define HIFC_QID_SQ 0
#define HIFC_ADJUST_DATA(old_val, new_val) ((old_val) = (new_val))
#define HIFC_GET_RESET_TYPE(tmf_flag, reset_flag) \
do { \
switch (tmf_flag) { \
case UNF_FCP_TM_ABORT_TASK_SET: \
case UNF_FCP_TM_LOGICAL_UNIT_RESET: \
reset_flag = HIFC_FCP_TMF_LUN_RESET; \
break; \
case UNF_FCP_TM_TARGET_RESET: \
reset_flag = HIFC_FCP_TMF_TGT_RESET; \
break; \
case UNF_FCP_TM_CLEAR_TASK_SET: \
reset_flag = HIFC_FCP_TMF_PORT_RESET; \
break; \
default: \
reset_flag = HIFC_FCP_TMF_RSVD; \
} \
} while (0)
/*
* nic_wqe_ctrl_sec table define
*/
struct nic_wqe_ctrl_sec {
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* marks ownership of WQE */
u32 owner : 1;
/* Control Section Length */
u32 ctrl_sec_len : 2;
/* Completion Section Length */
u32 completion_sec_len : 2;
/* DIF Section Length */
u32 dif_sec_len : 3;
/*
* Completion is Required - marks CQE generation request
* per WQE
*/
u32 cr : 1;
/* Data Format - format of BDS */
u32 df : 1;
/* Virtual Address */
u32 va : 1;
/* Task Section Length */
u32 task_sec_len : 5;
/* Completion Format */
u32 cf : 1;
u32 wf : 1;
/* reserved */
u32 rsvd : 4;
/* Driver Section Length */
u32 drv_sec_len : 2;
/* Buffer Descriptors Section Length */
u32 buf_desc_sec_len : 8;
#else
/* Buffer Descriptors Section Length */
u32 buf_desc_sec_len : 8;
/* Driver Section Length */
u32 drv_sec_len : 2;
/* reserved */
u32 rsvd : 4;
u32 wf : 1;
/* Completion Format */
u32 cf : 1;
/* Task Section Length */
u32 task_sec_len : 5;
/* Virtual Address */
u32 va : 1;
/* Data Format - format of BDS */
u32 df : 1;
/*
* Completion is Required - marks CQE generation request
* per WQE
*/
u32 cr : 1;
/* DIF Section Length */
u32 dif_sec_len : 3;
/* Completion Section Length */
u32 completion_sec_len : 2;
/* Control Section Length */
u32 ctrl_sec_len : 2;
/* marks ownership of WQE */
u32 owner : 1;
#endif
} bs;
u32 dw;
};
};
/*
* nic_rq_sge_sec table define
*/
struct nic_rq_sge_sec {
/* packet buffer address high */
u32 wb_addr_high;
/* packet buffer address low */
u32 wb_addr_low;
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd : 1;
/* SGE length */
u32 length : 31;
#else
/* SGE length */
u32 length : 31;
u32 rsvd : 1;
#endif
} bs0;
u32 dw0;
};
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* 0:list,1:last */
u32 list : 1;
/* 0:normal,1:pointer to next SGE */
u32 extension : 1;
/* key or unsed */
u32 key : 30;
#else
/* key or unsed */
u32 key : 30;
/* 0:normal,1:pointer to next SGE */
u32 extension : 1;
/* 0:list,1:last */
u32 list : 1;
#endif
} bs1;
u32 dw1;
};
};
/*
* nic_rq_bd_sec table define
*/
struct nic_rq_bd_sec {
/* packet buffer address high */
u32 pkt_buf_addr_high;
/* packet buffer address low */
u32 pkt_buf_addr_low;
};
/*
* nic_rq_wqe table define
*/
struct nic_rq_wqe {
struct nic_wqe_ctrl_sec rq_wqe_ctrl_sec;
u32 rsvd;
struct nic_rq_sge_sec rx_sge;
struct nic_rq_bd_sec pkt_buf_addr;
};
/* Link WQE structure */
struct hifc_link_wqe_s {
union {
struct {
unsigned int rsv1 : 14;
unsigned int wf : 1;
unsigned int rsv2 : 14;
unsigned int ctrlsl : 2;
unsigned int o : 1;
} wd0;
u32 val_wd0;
};
union {
struct {
unsigned int msn : 16;
unsigned int dump_msn : 15;
/* lp means whether O bit is overturn */
unsigned int lp : 1;
} wd1;
unsigned int val_wd1;
};
unsigned int next_page_addr_hi;
unsigned int next_page_addr_lo;
};
struct hifc_root_rq_complet_info_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int done : 1; /* done bit,ucode will set to 1 */
unsigned int rsvd1 : 6;
unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */
unsigned int rsvd2 : 24;
#else
unsigned int rsvd2 : 24;
unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */
unsigned int rsvd1 : 6;
unsigned int done : 1; /* done bit,ucode will set to 1 */
#endif
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned short buf_length;
unsigned short exch_id;
#else
unsigned short exch_id;
unsigned short buf_length;
#endif
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned short sts_only; /* If only CMPL SECTION */
unsigned short status; /* 0:no err;!0:others */
#else
unsigned short status; /* 0:no err;!0:others */
unsigned short sts_only; /* If only CMPL SECTION */
#endif
unsigned int magic_num;
unsigned int rsvd[4];
};
/* Parent SQ WQE */
struct hifc_root_sge_s {
unsigned int buf_addr_hi;
unsigned int buf_addr_lo;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int ext_flag : 1;
unsigned int buf_len : 31;
#else
unsigned int buf_len : 31;
unsigned int ext_flag : 1;
#endif
} wd0;
struct {
unsigned int rsvd;
} wd1;
};
/* Root SQ WQE Task Section structure for FC */
struct hifc_root_sqe_task_section_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int task_type : 8;
/* 1:offload enable,0:offload disable. */
unsigned int off_load : 1;
unsigned int port_id : 4;
unsigned int host_id : 2;
unsigned int rsvd1 : 1;
unsigned int exch_id : 16;
#else
unsigned int exch_id : 16;
unsigned int rsvd1 : 1;
unsigned int host_id : 2;
unsigned int port_id : 4;
unsigned int off_load : 1;
unsigned int task_type : 8;
#endif
} fc_dw0;
union {
unsigned int context_gpa_hi;
unsigned int magic_num;
} fc_dw1;
struct {
unsigned int context_gpa_lo;
} fc_dw2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned short scq_num; /* SCQ num */
unsigned short rport_index; /* RPort */
#else
unsigned short rport_index; /* RPort */
unsigned short scq_num; /* SCQ num */
#endif
} fc_dw3;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */
unsigned int pkt_cos : 3;
unsigned int rsvd2 : 1;
unsigned int csize : 2;
unsigned int service_type : 5;
unsigned int parent_xid : 20;
#else
unsigned int parent_xid : 20;
unsigned int service_type : 5;
unsigned int csize : 2;
unsigned int rsvd2 : 1;
unsigned int pkt_cos : 3; /* pkt cos,4:ETH, 0:FC */
unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */
#endif
} fc_dw4;
struct {
unsigned int rsvd;
} fc_dw5;
};
/* Root SQ WQE */
struct hifc_root_sqe_s {
/* Control Section */
struct hifcoe_wqe_ctrl_s ctrl_section;
struct hifc_root_sqe_task_section_s task_section;
struct hifc_root_sge_s sge;
struct hifc_root_sge_s ctx_sge;
};
/* Parent SQ WQE and Root SQ WQE Related function */
void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned int ts_size,
unsigned int bdsl);
void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts,
unsigned int rport_index,
unsigned short local_xid,
unsigned short remote_xid,
unsigned short data_len);
void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr,
unsigned long long v_phyaddr,
unsigned int buf_len,
unsigned int xid, void *v_hba);
void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
void *v_sq_info, unsigned short cmnd,
unsigned int v_scqn, void *v_frame_pld);
void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info,
void *v_frame_pld, unsigned short type,
unsigned short cmnd, unsigned int v_scqn);
void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe,
unsigned short els_cmnd_type,
unsigned int v_magic_num);
void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int magic_num);
void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int abts_param,
unsigned int magic_num);
void hifc_build_service_wqe_root_ts(void *v_hba,
struct hifc_root_sqe_s *v_rt_sqe,
unsigned int rx_id, unsigned int rport_id,
unsigned int scq_num);
void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe,
void *v_buf_addr,
unsigned long long v_phyaddr,
unsigned int buf_len,
void *v_hba);
void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe,
dma_addr_t ctx_addr,
unsigned int xid);
void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned short owner,
unsigned short pmsn);
void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe);
void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe);
void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_ts_s *v_sqe_ts);
void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
unsigned char v_task_type,
unsigned short v_exi_base,
unsigned char v_port_idx);
void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe,
enum hifcoe_task_type_e task_type,
unsigned short rx_id);
void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe, unsigned short owner,
unsigned short pmsn);
void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl,
unsigned char v_task_len);
void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe,
unsigned long long v_ctx_addr,
unsigned int buf_len);
void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe);
#endif

View File

@ -0,0 +1,414 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFCOE_PARENT_CONTEXT_H__
#define __HIFCOE_PARENT_CONTEXT_H__
enum fc_parent_status_e {
FCOE_PARENT_STATUS_INVALID = 0,
FCOE_PARENT_STATUS_NORMAL,
FCOE_PARENT_STATUS_CLOSING
};
#define HIFCOE_DOUBLE_SGL (1)
#define HIFCOE_SINGLE_SGL (0)
#define HIFCOE_DIX_ALGORITHM_IP (1)
#define HIFCOE_DIX_ALGORITHM_CRC (0)
#define HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE (48)
#define HIFCOE_PARENT_CONTEXT_SRQ_QINFO_SIZE (8)
#define HIFCOE_PARENT_CONTEXT_TIMER_SIZE (32) /* 24+2*N,N=timer count */
#define HIFCOE_RQ_FILLED_OFFSET \
((u8)(u32)& \
(((struct hifcoe_sw_section_s *)0x0)->occupy_by_rqe_filled_flag))
#define HIFCOE_RW_LOCK_AREA_OFFSET \
((u8)(u32)&\
(((struct hifcoe_sw_section_s *)0x0)->occupy_by_rw_lock_area))
/* "fqg_level_eventiq_info_s" should be care if MAX_EVENTIQ_LEVEL is larger
* than 4
*/
#define MAX_EVENTIQ_LEVEL 4
#define MAX_EVENTIQ_LEVEL_SHIFT 2
#define SP_FEATRUE_EDTR 0x1
#define SP_FEATRUE_SEQ_CNT 0x2
#define MAX_PKT_SIZE_PER_DISPATCH (FC_PARENT_P->per_xmit_data_size)
#define MAX_PKT_SIZE_PER_DISPATCH_DIF_4K \
(MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 12) << 3))
#define MAX_PKT_SIZE_PER_DISPATCH_DIF_512B \
(MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 9) << 3))
#define MAX_PKT_SIZE_PER_DISPATCH_DIF(shift) \
(MAX_PKT_SIZE_PER_DISPATCH +\
((u32)((MAX_PKT_SIZE_PER_DISPATCH >> 9) >> (shift)) << 3))
/* immidiate data DIF info definition in parent context */
struct immi_dif_info_s {
union {
u32 value;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 pdu_difx_cnt :8;
u32 sct_size :1;/* Sector size, 1: 4K; 0: 512 */
u32 dif_verify_type :2; /* verify type */
u32 dif_ins_rep_type:2; /* ins&rep type */
u32 io_1st_pdu :1;
/* Check blocks whose application tag contains
* 0xFFFF flag
*/
u32 difx_app_esc :1;
u32 difx_ref_esc :1;
/*
* Check blocks whose reference tag contains 0xFFFF flag
*/
u32 grd_ctrl :3; /* The DIF/DIX Guard control */
/* Bit 0: DIF/DIX guard verify algorithm control */
u32 grd_agm_ctrl :2;
/*
* Bit 1: DIF/DIX guard replace or insert algorithm
* control
*/
u32 grd_agm_ini_ctrl :3;
/* The DIF/DIX Reference tag control */
u32 ref_tag_ctrl :3;
/* Bit 0: scenario of the reference tag verify mode */
u32 ref_tag_mode :2;
/*
* Bit 1: scenario of the reference tag insert/replace
* mode
*/
/* 0: fixed; 1: increasement;*/
u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */
#else
u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */
/* Bit 0: scenario of the reference tag verify mode */
u32 ref_tag_mode :2;
/*
* Bit 1: scenario of the reference tag insert/replace
* mode
*/
/* 0: fixed; 1: increasement;*/
/* The DIF/DIX Reference tag control */
u32 ref_tag_ctrl :3;
u32 grd_agm_ini_ctrl :3;
/* Bit 0: DIF/DIX guard verify algorithm control */
u32 grd_agm_ctrl :2;
/*
* Bit 1: DIF/DIX guard replace or insert algorithm
* control
*/
u32 grd_ctrl :3; /* The DIF/DIX Guard control */
/*
* Check blocks whose reference tag contains 0xFFFF flag
*/
u32 difx_ref_esc :1;
/*
* Check blocks whose application tag contains 0xFFFF
* flag
*/
u32 difx_app_esc :1;
u32 io_1st_pdu :1;
u32 dif_ins_rep_type:2; /* ins&rep type */
u32 dif_verify_type :2; /* verify type */
u32 sct_size :1; /* Sector size, 1: 4K; 0: 512 */
u32 pdu_difx_cnt :8;
#endif
} info;
} dif_dw3;
union {
u32 value;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 difx_len :11; /* DIF/DIFX total length */
u32 difx_en :1; /* DIF/DIFX enable flag */
u32 rsv0 :4;
u32 dif_cnt :16;
#else
u32 dif_cnt :16;
u32 rsv0 :4;
u32 difx_en :1; /* DIF/DIFX enable flag */
u32 difx_len :11; /* DIF/DIFX total length */
#endif
} info;
} dif_other;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rep_app_tag :16;
u32 cmp_app_tag :16;
#else
u32 cmp_app_tag :16;
u32 rep_app_tag :16;
#endif
/*
* The ref tag value for verify compare, do not support replace or
* insert ref tag
*/
u32 cmp_ref_tag;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 cmp_app_tag_msk :16;
u32 rsv1 :16;
#else
u32 rsv1 :16;
u32 cmp_app_tag_msk :16;
#endif
};
/* parent context SW section definition: SW(80B) */
struct hifcoe_sw_section_s {
/* RO fields */
u32 scq_num_rcv_cmd; /* scq number used for cmd receive */
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 xid; /* driver init */
#else
struct {
u32 xid :13;
u32 vport :7;
u32 csctrl :8;
u32 rsvd0 :4;
} sw_ctxt_vport_xid;
#endif
u32 cid; /* ucode init */
u16 conn_id;
u16 immi_rq_page_size;
u16 immi_taskid_min;
u16 immi_taskid_max;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 vlan_id : 16; /* Vlan ID */
/* phycial port to receive and transmit packet. */
u32 port_id : 4;
/*
* new srq offset. Ucode use new srq to receive els/gs with big payload.
*/
u32 rsvd1 : 5;
u32 srr_support : 2; /* sequence retransmition support flag */
u32 srv_type : 5;
#else
union {
u32 pctxt_val0;
struct {
u32 srv_type : 5; /* driver init */
/* sequence retransmition support flag */
u32 srr_support : 2;
u32 rsvd1 : 5;
u32 port_id : 4; /* driver init */
u32 vlan_id : 16; /* driver init */
} dw;
} sw_ctxt_misc;
#endif
u16 oqid_rd;
u16 oqid_wr;
u32 per_xmit_data_size;
/* RW fields */
u32 cmd_scq_gpa_h;
u32 cmd_scq_gpa_l;
/* E_D_TOV timer value: value should be set on ms by driver */
u32 e_d_tov_timer_val;
/*
* mfs unalined bytes of per 64KB dispatch; equal to
* "MAX_PKT_SIZE_PER_DISPATCH%info->parent->tx_mfs"
*/
u16 mfs_unaligned_bytes;
u16 tx_mfs; /* remote port max receive fc payload length */
/* max data len allowed in xfer_rdy dis scenario*/
u32 xfer_rdy_dis_max_len_remote;
u32 xfer_rdy_dis_max_len_local;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* Double or single SGL, 1: double; 0: single */
u32 sgl_num :1;
u32 write_xfer_rdy :1; /* WRITE Xfer_Rdy disable or enable */
u32 rec_support :1; /* REC support flag */
u32 conf_support :1; /* Response confirm support flag */
u32 vlan_enable :1; /* Vlan enable flag */
u32 e_d_tov :1; /* E_D_TOV Resolution, 0: ms, 1: us*/
/* seq_cnt, 1: increament support, 0: increament not support */
u32 seq_cnt :1;
/* 0:Target, 1:Initiator, 2:Target&Initiator */
u32 work_mode :2;
/* used for parent context cache Consistency judgment,1: done*/
u32 flush_done :1;
u32 oq_cos_cmd :3; /* esch oq cos for cmd/xferrdy/rsp */
u32 oq_cos_data :3; /* esch oq cos for data */
u32 cos :3; /* doorbell cos value */
u32 status :8; /* status of flow*/
u32 rsvd4 :2;
u32 priority :3; /* vlan priority */
#else
union {
struct {
u32 priority : 3; /* vlan priority */
u32 rsvd4 : 2;
u32 status : 8; /* status of flow*/
u32 cos : 3; /* doorbell cos value */
u32 oq_cos_data : 3; /* esch oq cos for data */
/* esch oq cos for cmd/xferrdy/rsp */
u32 oq_cos_cmd : 3;
/*
* used for parent context cache Consistency judgment,
* 1: done
*/
u32 flush_done : 1;
/* 0:Target, 1:Initiator, 2:Target&Initiator */
u32 work_mode : 2;
u32 seq_cnt : 1; /* seq_cnt */
u32 e_d_tov : 1; /* E_D_TOV resolution */
u32 vlan_enable : 1; /* Vlan enable flag */
/* Response confirm support flag */
u32 conf_support : 1;
u32 rec_support : 1; /* REC support flag */
/* WRITE Xfer_Rdy disable or enable */
u32 write_xfer_rdy : 1;
/* Double or single SGL, 1: double; 0: single */
u32 sgl_num : 1;
} dw;
u32 pctxt_val1;
} sw_ctxt_config;
#endif
/* immidiate data dif control info(20B) */
struct immi_dif_info_s immi_dif_info;
};
struct hifcoe_hw_rsvd_queue_s {
/* bitmap[0]:255-192 */
/* bitmap[1]:191-128 */
/* bitmap[2]:127-64 */
/* bitmap[3]:63-0 */
u64 seq_id_bitmap[4];
struct {
u64 last_req_seq_id : 8;
u64 xid : 20;
u64 rsvd0 : 36;
} wd0;
};
struct hifcoe_sq_qinfo_s {
u64 rsvd_0 : 10;
/* 0: get pmsn from queue header; 1: get pmsn from ucode */
u64 pmsn_type : 1;
u64 rsvd_1 : 4;
u64 cur_wqe_o : 1; /* should be opposite from loop_o */
u64 rsvd_2 : 48;
u64 cur_sqe_gpa;
u64 pmsn_gpa; /* sq's queue header gpa */
u64 sqe_dmaattr_idx : 6;
u64 sq_so_ro : 2;
u64 rsvd_3 : 2;
u64 ring : 1; /* 0: link; 1: ring */
u64 loop_o : 1; /* init to be the first round o-bit */
u64 rsvd_4 : 4;
u64 zerocopy_dmaattr_idx : 6;
u64 zerocopy_so_ro : 2;
u64 parity : 8;
u64 rsvd_5 : 26;
u64 pcie_template : 6;
};
struct hifcoe_cq_qinfo_s {
u64 pcie_template_hi : 3;
u64 parity_2 : 1;
u64 cur_cqe_gpa : 60;
u64 pi : 15;
u64 pi_o : 1;
u64 ci : 15;
u64 ci_o : 1;
/* if init_mode = 2, is msi/msi-x; other the low-5-bit means c_eqn */
u64 c_eqn_msi_x : 10;
u64 parity_1 : 1;
/* 0: get ci from queue header; 1: get ci from ucode */
u64 ci_type : 1;
u64 cq_depth : 3; /* valid when ring = 1 */
u64 armq : 1; /* 0: IDLE state; 1: NEXT state */
u64 cur_cqe_cnt : 8;
u64 cqe_max_cnt : 8;
u64 cqe_dmaattr_idx : 6;
u64 cq_so_ro : 2;
u64 init_mode : 2; /* 1: armQ; 2: msi/msi-x; others: rsvd */
u64 next_o : 1; /* next pate valid o-bit */
u64 loop_o : 1; /* init to be the first round o-bit */
u64 next_cq_wqe_page_gpa : 52;
u64 pcie_template_lo : 3;
u64 parity_0 : 1;
u64 ci_gpa : 60; /* cq's queue header gpa */
};
struct hifcoe_scq_qinfo_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
union {
struct {
u64 parity : 6;
u64 rq_th2_preld_cache_num : 5;
u64 rq_th1_preld_cache_num : 5;
u64 rq_th0_preld_cache_num : 5;
u64 rq_min_preld_cache_num : 4;
u64 sq_th2_preld_cache_num : 5;
u64 sq_th1_preld_cache_num : 5;
u64 sq_th0_preld_cache_num : 5;
u64 sq_min_preld_cache_num : 4;
u64 scq_n : 20; /* scq number */
} info;
u64 pctxt_val1;
} hw_scqc_config;
#else
union {
struct {
u64 scq_n : 20; /* scq number */
u64 sq_min_preld_cache_num : 4;
u64 sq_th0_preld_cache_num : 5;
u64 sq_th1_preld_cache_num : 5;
u64 sq_th2_preld_cache_num : 5;
u64 rq_min_preld_cache_num : 4;
u64 rq_th0_preld_cache_num : 5;
u64 rq_th1_preld_cache_num : 5;
u64 rq_th2_preld_cache_num : 5;
u64 parity : 6;
} info;
u64 pctxt_val1;
} hw_scqc_config;
#endif
};
struct hifcoe_srq_qinfo_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u64 srqc_gpa : 60;
u64 parity : 4;
#else
u64 parity : 4;
u64 srqc_gpa : 60;
#endif
};
/* here is the layout of service type 12/13 */
struct hifcoe_parent_context_s {
u8 key[HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE];
struct hifcoe_scq_qinfo_s resp_scq_qinfo;
struct hifcoe_srq_qinfo_s imm_srq_info;
struct hifcoe_sq_qinfo_s sq_qinfo;
u8 timer_section[HIFCOE_PARENT_CONTEXT_TIMER_SIZE];
struct hifcoe_hw_rsvd_queue_s hw_rsvdq;
struct hifcoe_srq_qinfo_s els_srq_info;
struct hifcoe_sw_section_s sw_section;
};
#endif

1698
hifc/hifcoe_wqe.h Normal file

File diff suppressed because it is too large Load Diff

1893
hifc/unf_common.h Normal file

File diff suppressed because it is too large Load Diff

1320
hifc/unf_disc.c Normal file

File diff suppressed because it is too large Load Diff

53
hifc/unf_disc.h Normal file
View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_DISC_H__
#define __UNF_DISC_H__
#define UNF_DISC_RETRY_TIMES 3
#define UNF_DISC_NONE 0
#define UNF_DISC_FABRIC 1
#define UNF_DISC_LOOP 2
enum unf_disc_state_e {
UNF_DISC_ST_START = 0x3000,
UNF_DISC_ST_GIDPT_WAIT,
UNF_DISC_ST_GIDFT_WAIT,
UNF_DISC_ST_END
};
enum unf_disc_event_e {
UNF_EVENT_DISC_NORMAL_ENTER = 0x8000,
UNF_EVENT_DISC_FAILED = 0x8001,
UNF_EVENT_DISC_SUCCESS = 0x8002,
UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003,
UNF_EVENT_DISC_LINKDOWN = 0x8004
};
enum unf_disc_type_e {
UNF_DISC_GET_PORT_NAME = 0,
UNF_DISC_GET_NODE_NAME,
UNF_DISC_GET_FEATURE
};
struct unf_disc_gs_event_info {
void *lport;
void *rport;
unsigned int rport_id;
enum unf_disc_type_e entype;
struct list_head list_entry;
};
unsigned int unf_get_and_post_disc_event(void *v_lport,
void *v_sns_port,
unsigned int v_nport_id,
enum unf_disc_type_e v_en_type);
void unf_flush_disc_event(void *v_disc, void *v_vport);
void unf_disc_error_recovery(void *v_lport);
void unf_disc_mgr_destroy(void *v_lport);
void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd);
#endif

556
hifc/unf_event.c Normal file
View File

@ -0,0 +1,556 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_event.h"
#include "unf_lport.h"
struct unf_event_list fc_event_list;
struct unf_global_event_queue global_event_queue;
/* Max global event node */
#define UNF_MAX_GLOBAL_ENENT_NODE 24
unsigned int unf_init_event_msg(struct unf_lport_s *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned int i;
unsigned long flags = 0;
UNF_CHECK_VALID(0x770, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
event_mgr = &v_lport->event_mgr;
/* Get and Initial Event Node resource */
event_mgr->pmem_add =
vmalloc((size_t)event_mgr->free_event_count *
sizeof(struct unf_cm_event_report));
if (!event_mgr->pmem_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate event manager failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
memset(event_mgr->pmem_add, 0,
((size_t)event_mgr->free_event_count *
sizeof(struct unf_cm_event_report)));
event_node = (struct unf_cm_event_report *)(event_mgr->pmem_add);
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
for (i = 0; i < event_mgr->free_event_count; i++) {
INIT_LIST_HEAD(&event_node->list_entry);
list_add_tail(&event_node->list_entry,
&event_mgr->list_free_event);
event_node++;
}
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return RETURN_OK;
}
static void unf_del_eventcenter(struct unf_lport_s *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
UNF_CHECK_VALID(0x771, UNF_TRUE, v_lport, return);
event_mgr = &v_lport->event_mgr;
event_mgr->pfn_unf_get_free_event = NULL;
event_mgr->pfn_unf_release_event = NULL;
event_mgr->pfn_unf_post_event = NULL;
}
void unf_init_event_node(struct unf_cm_event_report *v_event_node)
{
UNF_CHECK_VALID(0x776, UNF_TRUE, v_event_node, return);
v_event_node->event = UNF_EVENT_TYPE_REQUIRE;
v_event_node->event_asy_flag = UNF_EVENT_ASYN;
v_event_node->delay_times = 0;
v_event_node->para_in = NULL;
v_event_node->para_out = NULL;
v_event_node->result = 0;
v_event_node->lport = NULL;
v_event_node->pfn_unf_event_task = NULL;
v_event_node->pfn_unf_event_recovery_strategy = NULL;
v_event_node->pfn_unf_event_alarm_strategy = NULL;
}
struct unf_cm_event_report *unf_get_free_event_node(void *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_cm_event_report *event_node = NULL;
struct list_head *list_node = NULL;
struct unf_lport_s *root_lport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x777, UNF_TRUE, v_lport, return NULL);
root_lport = (struct unf_lport_s *)v_lport;
root_lport = root_lport->root_lport;
if (unlikely(atomic_read(&root_lport->port_no_operater_flag) ==
UNF_LPORT_NOP))
return NULL;
/* Get EventMgr from Lport */
event_mgr = &root_lport->event_mgr;
/* Get free node free pool */
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
if (list_empty(&event_mgr->list_free_event)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) have no event node anymore",
root_lport->port_id);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return NULL;
}
list_node = (&event_mgr->list_free_event)->next;
list_del(list_node);
event_mgr->free_event_count--;
event_node = list_entry(list_node, struct unf_cm_event_report,
list_entry);
/* Initial event node */
unf_init_event_node(event_node);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return event_node;
}
void unf_check_event_mgr_status(struct unf_event_mgr *v_event_mgr)
{
unsigned long flag = 0;
UNF_CHECK_VALID(0x773, UNF_TRUE, v_event_mgr, return);
spin_lock_irqsave(&v_event_mgr->port_event_lock, flag);
if ((v_event_mgr->emg_completion) &&
(v_event_mgr->free_event_count == UNF_MAX_EVENT_NODE)) {
complete(v_event_mgr->emg_completion);
}
spin_unlock_irqrestore(&v_event_mgr->port_event_lock, flag);
}
void unf_release_event(void *v_lport, void *v_event_node)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_lport_s *root_lport = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x778, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x779, UNF_TRUE, v_event_node, return);
event_node = (struct unf_cm_event_report *)v_event_node;
root_lport = (struct unf_lport_s *)v_lport;
root_lport = root_lport->root_lport;
event_mgr = &root_lport->event_mgr;
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
event_mgr->free_event_count++;
unf_init_event_node(event_node);
list_add_tail(&event_node->list_entry, &event_mgr->list_free_event);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
unf_check_event_mgr_status(event_mgr);
}
void unf_post_event(void *v_lport, void *v_event_node)
{
struct unf_cm_event_report *event_node = NULL;
struct unf_chip_manage_info_s *card_thread_info = NULL;
struct unf_lport_s *root_lport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x780, UNF_TRUE, v_event_node, return);
event_node = (struct unf_cm_event_report *)v_event_node;
UNF_REFERNCE_VAR(v_lport);
/* If null, post to global event center */
if (!v_lport) {
spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags);
fc_event_list.list_num++;
list_add_tail(&event_node->list_entry,
&fc_event_list.list_head);
spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock,
flags);
wake_up_process(event_thread);
} else {
root_lport = (struct unf_lport_s *)v_lport;
root_lport = root_lport->root_lport;
card_thread_info = root_lport->chip_info;
/* Post to global event center */
if (!card_thread_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT,
UNF_WARN,
"[warn]Port(0x%x) has strange event with type(0x%x)",
root_lport->nport_id, event_node->event);
spin_lock_irqsave(&fc_event_list.fc_eventlist_lock,
flags);
fc_event_list.list_num++;
list_add_tail(&event_node->list_entry,
&fc_event_list.list_head);
spin_unlock_irqrestore(
&fc_event_list.fc_eventlist_lock,
flags);
wake_up_process(event_thread);
} else {
spin_lock_irqsave(
&card_thread_info->chip_event_list_lock,
flags);
card_thread_info->list_num++;
list_add_tail(&event_node->list_entry,
&card_thread_info->list_head);
spin_unlock_irqrestore(
&card_thread_info->chip_event_list_lock,
flags);
wake_up_process(card_thread_info->data_thread);
}
}
}
unsigned int unf_init_event_center(void *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
unsigned int ret = RETURN_OK;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x772, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
/* Initial Disc manager */
event_mgr = &lport->event_mgr;
event_mgr->free_event_count = UNF_MAX_EVENT_NODE;
event_mgr->pfn_unf_get_free_event = unf_get_free_event_node;
event_mgr->pfn_unf_release_event = unf_release_event;
event_mgr->pfn_unf_post_event = unf_post_event;
INIT_LIST_HEAD(&event_mgr->list_free_event);
spin_lock_init(&event_mgr->port_event_lock);
event_mgr->emg_completion = NULL;
ret = unf_init_event_msg(lport);
return ret;
}
void unf_wait_event_mgr_complete(struct unf_event_mgr *v_event_mgr)
{
struct unf_event_mgr *event_mgr = NULL;
int wait = UNF_FALSE;
unsigned long mg_flag = 0;
struct completion fc_event_completion =
COMPLETION_INITIALIZER(fc_event_completion);
UNF_CHECK_VALID(0x774, UNF_TRUE, v_event_mgr, return);
event_mgr = v_event_mgr;
spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag);
if (event_mgr->free_event_count != UNF_MAX_EVENT_NODE) {
event_mgr->emg_completion = &fc_event_completion;
wait = UNF_TRUE;
}
spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag);
if (wait == UNF_TRUE)
wait_for_completion(event_mgr->emg_completion);
spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag);
event_mgr->emg_completion = NULL;
spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag);
}
unsigned int unf_event_center_destroy(void *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct list_head *list = NULL;
struct list_head *list_tmp = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned int ret = RETURN_OK;
unsigned long flag = 0;
unsigned long list_lock_flag = 0;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x775, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
event_mgr = &lport->event_mgr;
spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, list_lock_flag);
if (!list_empty(&fc_event_list.list_head)) {
list_for_each_safe(list, list_tmp, &fc_event_list.list_head) {
event_node = list_entry(list,
struct unf_cm_event_report,
list_entry);
if (lport == event_node->lport) {
list_del_init(&event_node->list_entry);
if (event_node->event_asy_flag ==
UNF_EVENT_SYN) {
event_node->result = UNF_RETURN_ERROR;
complete(&event_node->event_comp);
}
spin_lock_irqsave(&event_mgr->port_event_lock,
flag);
event_mgr->free_event_count++;
list_add_tail(&event_node->list_entry,
&event_mgr->list_free_event);
spin_unlock_irqrestore(
&event_mgr->port_event_lock, flag);
}
}
}
spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock,
list_lock_flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait event", lport->port_id);
unf_wait_event_mgr_complete(event_mgr);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait event process end", lport->port_id);
unf_del_eventcenter(lport);
vfree(event_mgr->pmem_add);
event_mgr->pmem_add = NULL;
lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER;
return ret;
}
static void unf_procee_asyn_event(struct unf_cm_event_report *v_event_node)
{
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = (struct unf_lport_s *)v_event_node->lport;
UNF_CHECK_VALID(0x782, UNF_TRUE, lport, return);
if (v_event_node->pfn_unf_event_task)
ret = (unsigned int)
v_event_node->pfn_unf_event_task(v_event_node->para_in,
v_event_node->para_out);
if (lport->event_mgr.pfn_unf_release_event)
lport->event_mgr.pfn_unf_release_event(lport, v_event_node);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN,
"[warn]Port(0x%x) handle event(0x%x) failed",
lport->port_id, v_event_node->event);
}
UNF_REFERNCE_VAR(ret);
}
void unf_release_global_event(void *v_event_node)
{
unsigned long flag = 0;
struct unf_cm_event_report *event_node = NULL;
UNF_CHECK_VALID(0x784, UNF_TRUE, v_event_node, return);
event_node = (struct unf_cm_event_report *)v_event_node;
unf_init_event_node(event_node);
spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag);
global_event_queue.list_number++;
list_add_tail(&event_node->list_entry,
&global_event_queue.global_eventlist);
spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock,
flag);
}
void unf_handle_event(struct unf_cm_event_report *v_event_node)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int event = 0;
unsigned int event_asy_flag = UNF_EVENT_ASYN;
UNF_CHECK_VALID(0x781, UNF_TRUE, v_event_node, return);
UNF_REFERNCE_VAR(ret);
UNF_REFERNCE_VAR(event);
event = v_event_node->event;
event_asy_flag = v_event_node->event_asy_flag;
switch (event_asy_flag) {
case UNF_EVENT_SYN: /* synchronous event node */
case UNF_GLOBAL_EVENT_SYN:
if (v_event_node->pfn_unf_event_task) {
ret = (unsigned int)v_event_node->pfn_unf_event_task(
v_event_node->para_in,
v_event_node->para_out);
}
v_event_node->result = ret;
complete(&v_event_node->event_comp);
break;
case UNF_EVENT_ASYN: /* asynchronous event node */
unf_procee_asyn_event(v_event_node);
break;
case UNF_GLOBAL_EVENT_ASYN:
if (v_event_node->pfn_unf_event_task) {
ret = (unsigned int)v_event_node->pfn_unf_event_task(
v_event_node->para_in,
v_event_node->para_out);
}
unf_release_global_event(v_event_node);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_EVENT, UNF_WARN,
"[warn]handle global event(0x%x) failed",
event);
}
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN,
"[warn]Unknown event(0x%x)", event);
break;
}
}
unsigned int unf_init_global_event_msg(void)
{
struct unf_cm_event_report *event_node = NULL;
unsigned int ret = RETURN_OK;
unsigned int i = 0;
unsigned long flag = 0;
INIT_LIST_HEAD(&global_event_queue.global_eventlist);
spin_lock_init(&global_event_queue.global_eventlist_lock);
global_event_queue.list_number = 0;
global_event_queue.global_event_add =
vmalloc(UNF_MAX_GLOBAL_ENENT_NODE *
sizeof(struct unf_cm_event_report));
if (!global_event_queue.global_event_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Can't allocate global event queue");
return UNF_RETURN_ERROR;
}
memset(global_event_queue.global_event_add, 0,
(sizeof(struct unf_cm_event_report) *
UNF_MAX_GLOBAL_ENENT_NODE));
event_node = (struct unf_cm_event_report *)
(global_event_queue.global_event_add);
spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag);
for (i = 0; i < UNF_MAX_GLOBAL_ENENT_NODE; i++) {
INIT_LIST_HEAD(&event_node->list_entry);
list_add_tail(&event_node->list_entry,
&global_event_queue.global_eventlist);
global_event_queue.list_number++;
event_node++;
}
spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock,
flag);
return ret;
}
void unf_destroy_global_event_msg(void)
{
if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_CRITICAL,
"[warn]Global event release not complete with remain nodes(0x%x)",
global_event_queue.list_number);
}
vfree(global_event_queue.global_event_add);
}
unsigned int unf_schedule_global_event(
void *v_para,
unsigned int v_event_asy_flag,
int (*pfn_unf_event_task)(void *v_argin, void *v_argout))
{
struct list_head *list_node = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x783, UNF_TRUE, pfn_unf_event_task,
return UNF_RETURN_ERROR);
if ((v_event_asy_flag != UNF_GLOBAL_EVENT_ASYN) &&
(v_event_asy_flag != UNF_GLOBAL_EVENT_SYN)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Event async flag(0x%x) abnormity",
v_event_asy_flag);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag);
if (list_empty(&global_event_queue.global_eventlist)) {
spin_unlock_irqrestore(
&global_event_queue.global_eventlist_lock, flag);
return UNF_RETURN_ERROR;
}
list_node = (&global_event_queue.global_eventlist)->next;
list_del_init(list_node);
global_event_queue.list_number--;
event_node = list_entry(list_node, struct unf_cm_event_report,
list_entry);
spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock,
flag);
/* Initial global event */
unf_init_event_node(event_node);
init_completion(&event_node->event_comp);
event_node->event_asy_flag = v_event_asy_flag;
event_node->pfn_unf_event_task = pfn_unf_event_task;
event_node->para_in = (void *)v_para;
event_node->para_out = NULL;
unf_post_event(NULL, event_node);
if (v_event_asy_flag == UNF_GLOBAL_EVENT_SYN) {
/* must wait for complete */
wait_for_completion(&event_node->event_comp);
ret = event_node->result;
unf_release_global_event(event_node);
} else {
ret = RETURN_OK;
}
return ret;
}
struct unf_cm_event_report *unf_get_one_event_node(void *v_lport)
{
struct unf_lport_s *lport = (struct unf_lport_s *)v_lport;
UNF_CHECK_VALID(0x785, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x786, UNF_TRUE,
lport->event_mgr.pfn_unf_get_free_event,
return NULL);
return lport->event_mgr.pfn_unf_get_free_event((void *)lport);
}
void unf_post_one_event_node(void *v_lport,
struct unf_cm_event_report *v_event)
{
struct unf_lport_s *lport = (struct unf_lport_s *)v_lport;
UNF_CHECK_VALID(0x787, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x788, UNF_TRUE, v_event, return);
UNF_CHECK_VALID(0x789, UNF_TRUE, lport->event_mgr.pfn_unf_post_event,
return);
UNF_CHECK_VALID(0x790, UNF_TRUE, v_event, return);
lport->event_mgr.pfn_unf_post_event((void *)lport, v_event);
}

101
hifc/unf_event.h Normal file
View File

@ -0,0 +1,101 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_EVENT_H__
#define __UNF_EVENT_H__
#include "hifc_knl_adp.h"
enum unf_poll_flag {
UNF_POLL_CHIPERROR_FLAG = 0, /* CHIP ERROR POLL */
UNF_POLL_ERROR_CODE, /* CODE ERROR POLL */
UNF_POLL_SFP_FLAG, /* SFP POLL */
UNF_POLL_BUTT
};
#define UNF_MAX_EVENT_NODE 256
enum unf_event_type {
UNF_EVENT_TYPE_ALARM = 0, /* Alarm */
UNF_EVENT_TYPE_REQUIRE, /* Require */
UNF_EVENT_TYPE_RECOVERY, /* Recovery */
UNF_EVENT_TYPE_BUTT
};
struct unf_cm_event_report {
/* event type */
unsigned int event;
/* ASY flag */
unsigned int event_asy_flag;
/* Delay times,must be async event */
unsigned int delay_times;
struct list_head list_entry;
void *lport;
/* parameter */
void *para_in;
void *para_out;
unsigned int result;
/* recovery strategy */
int (*pfn_unf_event_task)(void *v_argin, void *v_argout);
/* recovery strategy */
int (*pfn_unf_event_recovery_strategy)(void *);
/* alarm strategy */
int (*pfn_unf_event_alarm_strategy)(void *);
struct completion event_comp;
};
struct unf_event_mgr {
spinlock_t port_event_lock;
unsigned int free_event_count;
struct list_head list_free_event;
struct completion *emg_completion;
void *pmem_add;
struct unf_cm_event_report *(*pfn_unf_get_free_event)(void *v_lport);
void (*pfn_unf_release_event)(void *v_lport, void *v_event_node);
void (*pfn_unf_post_event)(void *v_lport, void *v_event_node);
};
struct unf_global_event_queue {
void *global_event_add;
unsigned int list_number;
struct list_head global_eventlist;
spinlock_t global_eventlist_lock;
};
struct unf_event_list {
struct list_head list_head;
spinlock_t fc_eventlist_lock;
unsigned int list_num; /* list node number */
};
void unf_handle_event(struct unf_cm_event_report *v_event_node);
unsigned int unf_init_global_event_msg(void);
void unf_destroy_global_event_msg(void);
unsigned int unf_schedule_global_event(
void *v_para,
unsigned int v_event_asy_flag,
int (*pfn_unf_event_task)(void *v_argin, void *v_argout));
struct unf_cm_event_report *unf_get_one_event_node(void *v_lport);
void unf_post_one_event_node(void *v_lport,
struct unf_cm_event_report *v_event);
unsigned int unf_event_center_destroy(void *v_lport);
unsigned int unf_init_event_center(void *v_lport);
extern struct task_struct *event_thread;
extern struct unf_global_event_queue global_event_queue;
extern struct unf_event_list fc_event_list;
#endif

3632
hifc/unf_exchg.c Normal file

File diff suppressed because it is too large Load Diff

512
hifc/unf_exchg.h Normal file
View File

@ -0,0 +1,512 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_FCEXCH_H__
#define __UNF_FCEXCH_H__
#include "unf_scsi_common.h"
#include "unf_lport.h"
#define DRV_VERIFY_CRC_MASK (1 << 1)
#define DRV_VERIFY_APP_MASK (1 << 2)
#define DRV_VERIFY_LBA_MASK (1 << 3)
#define DRV_DIF_CRC_POS 0
#define DRV_DIF_CRC_LEN 2
#define DRV_DIF_APP_POS 2
#define DRV_DIF_APP_LEN 2
#define DRV_DIF_LBA_POS 4
#define DRV_DIF_LBA_LEN 4
enum unf_ioflow_id_e {
XCHG_ALLOC = 0,
TGT_RECEIVE_ABTS,
TGT_ABTS_DONE,
TGT_IO_SRR,
SFS_RESPONSE,
SFS_TIMEOUT,
INI_SEND_CMND,
INI_RESPONSE_DONE,
INI_EH_ABORT,
INI_EH_DEVICE_RESET,
INI_EH_BLS_DONE,
INI_IO_TIMEOUT,
INI_REQ_TIMEOUT,
XCHG_CANCEL_TIMER,
XCHG_FREE_XCHG,
SEND_ELS,
IO_XCHG_WAIT,
XCHG_BUTT
};
enum unf_xchg_type_e {
UNF_XCHG_TYPE_INI = 0, /* INI IO */
UNF_XCHG_TYPE_SFS = 1, /* SFS IO */
UNF_XCHG_TYPE_INVALID
};
enum unf_xchg_mgr_type_e {
UNF_XCHG_MGR_TYPE_RANDOM = 0,
UNF_XCHG_MGR_TYPE_FIXED = 1,
UNF_XCHG_MGR_TYPE_INVALID
};
enum tgt_io_xchg_send_stage_e {
TGT_IO_SEND_STAGE_NONE = 0,
TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */
TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */
TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */
TGT_IO_SEND_STAGE_INVALID
};
enum tgt_io_send_result_e {
TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */
TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */
TGT_IO_SEND_RESULT_INVALID
};
struct unf_ioflow_id_s {
char *stage;
};
#define UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg) \
((v_oxid == xchg->ox_id) && (v_oid == xchg->oid) && \
(atomic_read(&xchg->ref_cnt) > 0))
#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \
xchg_alloc_time) \
do { \
if (unlikely((pkg_alloc_time != 0) && \
(pkg_alloc_time != xchg_alloc_time))) { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, \
UNF_ERR, \
"Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not equal,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \
lport->port_id, lport->nport_id, \
xchg_tag, exchg, \
pkg_alloc_time, xchg_alloc_time); \
return UNF_RETURN_ERROR; \
}; \
if (unlikely(pkg_alloc_time == 0)) { \
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, \
UNF_MAJOR, \
"Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \
lport->port_id, lport->nport_id, \
xchg_tag, exchg, \
pkg_alloc_time, xchg_alloc_time); \
}; \
} while (0)
#define UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code) \
do { \
if (DRV_VERIFY_CRC_MASK & \
v_xchg->dif_control.protect_opcode) { \
if (memcmp(&dif_control->actual_dif[DRV_DIF_CRC_POS], \
&dif_control->expected_dif[DRV_DIF_CRC_POS], \
DRV_DIF_CRC_LEN) != 0) { \
tgt_err_code = default_err_code; \
} \
} \
} while (0)
#define UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code) \
do { \
if ((check_err_code == tgt_err_code) && \
(DRV_VERIFY_LBA_MASK & v_xchg->dif_control.protect_opcode)) { \
if (memcmp(&dif_control->actual_dif[DRV_DIF_LBA_POS], \
&dif_control->expected_dif[DRV_DIF_LBA_POS], \
DRV_DIF_LBA_LEN) != 0) { \
tgt_err_code = default_err_code; \
} \
} \
} while (0)
#define UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code) \
UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code)
#define UNF_SET_SCSI_CMND_RESULT(v_xchg, v_result) \
((v_xchg)->scsi_cmnd_info.result = (v_result))
#define UNF_GET_GS_SFS_XCHG_TIMER(v_lport) (3 * \
(unsigned long)(v_lport)->ra_tov)
#define UNF_GET_BLS_SFS_XCHG_TIMER(v_lport) (2 * \
(unsigned long)(v_lport)->ra_tov)
#define UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) (2 * \
(unsigned long)(v_lport)->ra_tov)
#define UNF_XCHG_MGR_FC 0
#define UNF_XCHG_MIN_XID 0x0000
#define UNF_XCHG_MAX_XID 0xffff
#define UNF_ELS_ECHO_RESULT_OK 0
#define UNF_ELS_ECHO_RESULT_FAIL 1
struct unf_xchg_s;
/* Xchg hot pool, busy IO lookup Xchg */
struct unf_xchg_hot_pool_s {
/* Xchg sum, in hot pool */
unsigned short total_xchges;
/* Total number of resources consumedcorresponding to buffer */
unsigned int total_res_cnt;
enum int_e wait_state;
/* pool lock */
spinlock_t xchg_hot_pool_lock;
/* Xchg posiontion list */
struct list_head sfs_busylist;
struct list_head ini_busylist;
struct list_head list_destroy_xchg;
/* Next free hot point */
unsigned short slab_next_index;
unsigned short slab_total_sum;
unsigned short base;
struct unf_lport_s *lport;
struct unf_xchg_s *xchg_slab[0];
};
/* FREE POOL of Xchg*/
struct unf_xchg_free_pool_s {
spinlock_t xchg_free_pool_lock;
unsigned int fcp_xchg_sum;
/* IO used Xchg */
struct list_head list_free_xchg_list;
unsigned int total_fcp_xchg;
/* SFS used Xchg */
struct list_head list_sfs_xchg_list;
unsigned int total_sfs_xchg;
unsigned int sfs_xchg_sum;
struct completion *xchg_mgr_completion;
};
struct unf_big_sfs_s {
struct list_head entry_big_sfs;
void *vaddr;
unsigned int size;
};
struct unf_big_sfs_pool_s {
void *big_sfs_pool;
unsigned int free_count;
struct list_head list_free_pool;
struct list_head list_busy_pool;
spinlock_t big_sfs_pool_lock;
};
/* Xchg Manager for vport Xchg */
struct unf_xchg_mgr_s {
/* MG type */
unsigned int mgr_type;
/* MG entry */
struct list_head xchg_mgr_entry;
/* MG attribution */
unsigned short min_xid;
unsigned short max_xid;
unsigned int mem_size;
/* MG alloced resource */
void *fcp_mm_start;
unsigned int sfs_mem_size;
void *sfs_mm_start;
dma_addr_t sfs_phy_addr;
struct unf_xchg_free_pool_s free_pool;
struct unf_xchg_hot_pool_s *hot_pool;
struct unf_big_sfs_pool_s st_big_sfs_pool;
struct buf_describe_s big_sfs_buf_list;
struct buf_describe_s rsp_buf_list;
};
struct unf_seq_s {
/* Seq ID */
unsigned char seq_id;
/* Seq Cnt */
unsigned short seq_cnt;
/* Seq state and len,maybe used for fcoe */
unsigned short seq_stat;
unsigned int rec_data_len;
};
union unf_xchg_fcp_sfs_u {
struct unf_sfs_entry_s sfs_entry;
struct unf_fcp_rsp_iu_entry_s fcp_rsp_entry;
};
#define UNF_IO_STATE_NEW 0
#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */
#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */
#define TGT_IO_STATE_ABORT (1 << 7)
/* INI Upper-layer Task Management Commands */
#define INI_IO_STATE_UPTASK (1 << 15)
/* INI Upper-layer timeout Abort flag */
#define INI_IO_STATE_UPABORT (1 << 16)
#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */
#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */
#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */
#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */
/* INI only clear firmware resource flag */
#define INI_IO_STATE_ABORT_RESOURCE (1 << 21)
/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */
#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22)
#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */
/* INI busy IO session logo status */
#define INI_IO_STATE_LOGO (1 << 24)
#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */
#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */
#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */
#define TMF_RESPONSE_RECEIVED (1 << 0)
#define MARKER_STS_RECEIVED (1 << 1)
#define ABTS_RESPONSE_RECEIVED (1 << 2)
struct unf_scsi_cmd_info_s {
unsigned long time_out;
unsigned long abort_timeout;
void *scsi_cmnd;
void (*pfn_done)(struct unf_scsi_cmd_s *);
ini_get_sgl_entry_buf pfn_unf_get_sgl_entry_buf;
struct unf_ini_error_code_s *err_code_table; /* error code table */
char *sense_buf;
unsigned int err_code_table_cout; /* Size of the error code table */
unsigned int buf_len;
unsigned int entry_cnt;
unsigned int result; /* Stores command execution results */
unsigned int port_id;
/* Re-search for rport based on scsiid during retry. Otherwise,
* data inconsistency will occur
*/
unsigned int scsi_id;
void *sgl;
};
struct unf_req_sgl_info_s {
void *sgl;
void *sgl_start;
unsigned int req_index;
unsigned int entry_index;
};
struct unf_els_echo_info_s {
unsigned long long response_time;
struct semaphore echo_sync_sema;
unsigned int echo_result;
};
struct unf_xchg_s {
/* Mg resouce relative */
/* list delete from HotPool */
struct unf_xchg_hot_pool_s *hot_pool;
/* attach to FreePool */
struct unf_xchg_free_pool_s *free_pool;
struct unf_xchg_mgr_s *xchg_mgr;
struct unf_lport_s *lport; /* Local LPort/VLPort */
struct unf_rport_s *rport; /* Rmote Port */
struct unf_rport_s *disc_rport; /* Discover Rmote Port */
struct list_head list_xchg_entry;
struct list_head list_abort_xchg_entry;
spinlock_t xchg_state_lock;
/* Xchg reference */
atomic_t ref_cnt;
atomic_t esgl_cnt;
int debug_hook;
/* Xchg attribution */
unsigned short hot_pool_tag; /* Hot pool tag */
/* Only used for abort,ox_id
* lunrset/logo/plogi/linkdown set to 0xffff
*/
unsigned short abort_oxid;
unsigned int xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */
unsigned short ox_id;
unsigned short rx_id;
unsigned int sid;
unsigned int did;
unsigned int oid; /* ID of the exchange initiator */
unsigned int disc_port_id; /* Send GNN_ID/GFF_ID NPortId */
unsigned char seq_id;
unsigned char byte_orders; /* Byte order */
struct unf_seq_s seq;
unsigned int cmnd_code;
unsigned int world_id;
/* Dif control */
struct unf_dif_control_info_s dif_control;
struct dif_info_s dif_info;
/* IO status Abort,timer out */
unsigned int io_state; /* TGT_IO_STATE_E */
unsigned int tmf_state; /* TMF STATE */
unsigned int ucode_abts_state;
unsigned int abts_state;
/* IO Enqueuing */
enum tgt_io_xchg_send_stage_e io_send_stage; /* TGT_IO_SEND_STAGE_E */
/* IO Enqueuing result, success or failure */
enum tgt_io_send_result_e io_send_result; /* TGT_IO_SEND_RESULT_E */
/* Whether ABORT is delivered to the chip for IO */
unsigned char io_send_abort;
/* Result of delivering ABORT to the chip
* (success: UNF_TRUE; failure: UNF_FALSE)
*/
unsigned char io_abort_result;
/* for INI,Indicates the length of the data
* transmitted over the PCI link
*/
unsigned int data_len;
/* ResidLen,greater than 0 UnderFlow or Less than Overflow */
int resid_len;
/* +++++++++++++++++IO Special++++++++++++++++++++ */
/* point to tgt cmnd/req/scsi cmnd */
/* Fcp cmnd */
struct unf_fcp_cmnd_s fcp_cmnd;
struct unf_scsi_cmd_info_s scsi_cmnd_info;
struct unf_req_sgl_info_s req_sgl_info;
struct unf_req_sgl_info_s dif_sgl_info;
unsigned long long cmnd_sn;
/* timestamp */
unsigned long long start_jif;
unsigned long long alloc_jif;
unsigned long long io_front_jif;
/* I/O resources to be consumed,Corresponding to buffer */
unsigned int may_consume_res_cnt;
/* Number of resources consumed by I/Os. The value is not zero
* only when it is sent to the chip
*/
unsigned int fact_consume_res_cnt;
/* scsi req info */
unsigned int data_direction;
struct unf_big_sfs_s *big_sfs_buf;
/* scsi cmnd sense_buffer pointer */
union unf_xchg_fcp_sfs_u fcp_sfs_union;
/* One exchange may use several External Sgls */
struct list_head list_esgls;
struct unf_els_echo_info_s echo_info;
/* +++++++++++++++++Task Special++++++++++++++++++++ */
struct semaphore task_sema;
/* for RRQ ,IO Xchg add to SFS Xchg */
void *io_xchg;
/* Xchg delay work */
struct delayed_work timeout_work;
/* send result callback */
void (*pfn_ob_callback)(struct unf_xchg_s *);
/*Response IO callback */
void (*pfn_callback)(void *v_lport,
void *v_rport,
void *v_xchg);
/* Xchg release function */
void (*pfn_free_xchg)(struct unf_xchg_s *);
/* +++++++++++++++++low level Special++++++++++++++++++++ */
unsigned int private[PKG_MAX_PRIVATE_DATA_SIZE];
/* ABTS_RSP info */
struct unf_abts_rsps_s abts_rsps;
unsigned long long rport_bind_jifs;
/* sfs exchg ob callback status */
unsigned int ob_callback_sts;
unsigned int scsi_id;
atomic_t delay_flag;
void *upper_ct;
};
struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg);
void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport);
unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport);
unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport);
void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport);
void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport);
unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage);
void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage);
struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport,
unsigned int);
struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport(
struct unf_lport_s *v_lport, unsigned int);
void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr,
int v_done_ini_flag);
struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn(
void *v_lport,
unsigned long long v_command_sn,
unsigned int v_world_id);
void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_oxid,
unsigned int v_oid);
void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned long long v_lun_id,
void *v_tm_xchg, int v_abort_all_lun_flag);
void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid,
unsigned int v_did,
unsigned int extra_io_stat);
void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid,
unsigned int v_did);
void unf_cm_free_xchg(void *v_lport, void *v_xchg);
void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type);
void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag);
void unf_release_esgls(struct unf_xchg_s *v_xchg);
void unf_show_all_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr);
void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only);
void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport);
void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport,
enum int_e v_wait_state);
void unf_free_lport_all_xchg(struct unf_lport_s *v_lport);
bool unf_busy_io_completed(struct unf_lport_s *v_lport);
#endif

564
hifc/unf_init.c Normal file
View File

@ -0,0 +1,564 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Fabric Channel Linux driver
* Copyright(c) 2018 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_event.h"
#include "unf_exchg.h"
#include "unf_portman.h"
#include "unf_rport.h"
#include "unf_service.h"
#include "unf_io.h"
#define RPORT_FEATURE_POOL_SIZE 4096
static struct unf_esgl_page_s *unf_cm_get_one_free_esgl_page(
void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg);
static unsigned int unf_recv_tmf_marker_status(
void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg);
static unsigned int unf_recv_abts_mrker_status(
void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg);
static int unf_get_cfg_parms(char *v_section_name,
struct unf_cfg_item_s *v_cfg_parm,
unsigned int *v_cfg_value,
unsigned int v_item_num);
/* global variables */
unsigned int event_thread_exit;
struct task_struct *event_thread;
struct completion *fc_event_handle_thd_comp;
struct workqueue_struct *unf_work_queue;
struct unf_global_card_thread_s card_thread_mgr;
unsigned int unf_dbg_level = UNF_MAJOR;
unsigned int log_print_level = UNF_INFO;
unsigned int log_limted_times = UNF_LOGIN_ATT_PRINT_TIMES;
struct unf_cm_handle_op_s cm_low_levle_handle = {
.pfn_unf_alloc_local_port = unf_lport_create_and_init,
.pfn_unf_release_local_port = unf_release_local_port,
.pfn_unf_receive_els_pkg = unf_receive_els_pkg,
.pfn_unf_receive_gs_pkg = unf_receive_gs_pkg,
.pfn_unf_receive_bls_pkg = unf_receive_bls_pkg,
.pfn_unf_send_els_done = unf_send_els_done,
.pfn_unf_receive_ini_rsponse = unf_ini_scsi_completed,
.pfn_unf_get_cfg_parms = unf_get_cfg_parms,
.pfn_unf_receive_marker_status = unf_recv_tmf_marker_status,
.pfn_unf_receive_abts_marker_status = unf_recv_abts_mrker_status,
.pfn_unf_cm_get_sgl_entry = unf_ini_get_sgl_entry,
.pfn_unf_cm_get_dif_sgl_entry = unf_ini_get_dif_sgl_entry,
.pfn_unf_get_one_free_esgl_page = unf_cm_get_one_free_esgl_page,
.pfn_unf_fc_port_link_event = unf_fc_port_link_event,
.pfn_unf_ioctl_to_com_handler = unf_cmd_adm_handler,
};
static struct unf_esgl_page_s *unf_cm_get_one_free_esgl_page(
void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = NULL;
UNF_CHECK_VALID(0x1700, 1, v_lport, return NULL);
UNF_CHECK_VALID(0x1701, 1, v_fra_pkg, return NULL);
lport = (struct unf_lport_s *)v_lport;
xchg = (struct unf_xchg_s *)v_fra_pkg->xchg_contex;
return unf_get_one_free_esgl_page(lport, xchg); /* from esgl pool */
}
static int unf_get_cfg_parms(char *v_section_name,
struct unf_cfg_item_s *v_cfg_parm,
unsigned int *v_cfg_value,
unsigned int v_item_num)
{
/* Maximum length of a configuration item value,
* including the end character
*/
#define UNF_MAX_ITEM_VALUE_LEN (256)
unsigned int *value = NULL;
struct unf_cfg_item_s *cfg_parm = NULL;
unsigned int i = 0;
cfg_parm = v_cfg_parm;
value = v_cfg_value;
for (i = 0; i < v_item_num; i++) {
if (!cfg_parm || !value) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"[err]Config name or value is NULL");
return UNF_RETURN_ERROR;
}
if (strcmp("End", cfg_parm->name) == 0)
break;
if (strcmp("fw_path", cfg_parm->name) == 0) {
cfg_parm++;
value += UNF_MAX_ITEM_VALUE_LEN / sizeof(unsigned int);
continue;
}
*value = cfg_parm->default_value;
cfg_parm++;
value++;
}
return RETURN_OK;
}
static unsigned int unf_recv_tmf_marker_status(
void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned short hot_pool_tag = 0;
UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
/* Find exchange which point to marker sts */
if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) tag function is NULL",
lport->port_id);
return UNF_RETURN_ERROR;
}
hot_pool_tag = (unsigned short)
(v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]);
xchg = (struct unf_xchg_s *)
(lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport,
hot_pool_tag));
if (!xchg) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed",
lport->port_id, lport->nport_id, hot_pool_tag);
return UNF_RETURN_ERROR;
}
/*
* NOTE: set exchange TMF state with MARKER_STS_RECEIVED
*
* About TMF state
* 1. STS received
* 2. Response received
* 3. Do check if necessary
*/
xchg->tmf_state |= MARKER_STS_RECEIVED;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Marker STS: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x), EXCH: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x)",
v_fra_pkg->frame_head.rctl_did & UNF_NPORTID_MASK,
v_fra_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK,
(unsigned short)(v_fra_pkg->frame_head.oxid_rxid >> 16),
(unsigned short)(v_fra_pkg->frame_head.oxid_rxid),
xchg->did,
xchg->sid,
xchg->ox_id,
xchg->rx_id);
return RETURN_OK;
}
static unsigned int unf_recv_abts_mrker_status(
void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned short hot_pool_tag = 0;
unsigned long flags = 0;
UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
/* Find exchange by tag */
if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) tag function is NULL",
lport->port_id);
return UNF_RETURN_ERROR;
}
hot_pool_tag = (unsigned short)
(v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]);
xchg = (struct unf_xchg_s *)
(lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport,
hot_pool_tag));
if (!xchg) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed",
lport->port_id, lport->nport_id, hot_pool_tag);
return UNF_RETURN_ERROR;
}
/*
* NOTE: set exchange ABTS state with MARKER_STS_RECEIVED
*
* About exchange ABTS state
* 1. STS received
* 2. Response received
* 3. Do check if necessary
*
* About Exchange status get from low level
* 1. Set: when RCVD ABTS Marker
* 2. Set: when RCVD ABTS Req Done
* 3. value: set value with pkg->status
*/
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->ucode_abts_state = v_fra_pkg->status;
xchg->abts_state |= MARKER_STS_RECEIVED;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[info]Port(0x%x) wake up SEMA for Abts marker exchange(0x%p) oxid(0x%x 0x%x) status(0x%x)",
lport->port_id, xchg, xchg->ox_id, xchg->hot_pool_tag,
v_fra_pkg->abts_maker_status);
/*
* NOTE: Second time for ABTS marker received, or
* ABTS response have been received, no need to wake up sema
*/
if ((xchg->io_state & INI_IO_STATE_ABORT_TIMEOUT) ||
(xchg->abts_state & ABTS_RESPONSE_RECEIVED)) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_KEVENT,
"[info]Port(0x%x) no need to wake up SEMA for Abts marker ABTS_STATE(0x%x) IO_STATE(0x%x)",
lport->port_id, xchg->abts_state,
xchg->io_state);
return RETURN_OK;
}
if (xchg->io_state & INI_IO_STATE_TMF_ABORT) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_KEVENT,
"[info]Port(0x%x) receive Abts marker, exchange(%p) state(0x%x) free it",
lport->port_id, xchg, xchg->io_state);
unf_cm_free_xchg(lport, xchg);
} else {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
up(&xchg->task_sema);
}
return RETURN_OK;
}
unsigned int unf_get_cm_handle_op(struct unf_cm_handle_op_s *v_cm_handle)
{
UNF_CHECK_VALID(0x1708, UNF_TRUE, v_cm_handle,
return UNF_RETURN_ERROR);
memcpy(v_cm_handle, &cm_low_levle_handle,
sizeof(struct unf_cm_handle_op_s));
return RETURN_OK;
}
static void unf_uninit_cm_low_level_handle(void)
{
memset(&cm_low_levle_handle, 0, sizeof(struct unf_cm_handle_op_s));
}
int unf_event_process(void *v_arg)
{
struct list_head *node = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned long flags = 0;
UNF_REFERNCE_VAR(v_arg);
set_user_nice(current, 4);
recalc_sigpending();
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[event]Enter event thread");
complete(fc_event_handle_thd_comp);
do {
spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags);
if (list_empty(&fc_event_list.list_head) == UNF_TRUE) {
spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock,
flags);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((long)msecs_to_jiffies(1000));
} else {
node = (&fc_event_list.list_head)->next;
list_del_init(node);
fc_event_list.list_num--;
event_node = list_entry(node,
struct unf_cm_event_report,
list_entry);
spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock,
flags);
/* Process event node */
unf_handle_event(event_node);
}
} while (!event_thread_exit);
complete_and_exit(fc_event_handle_thd_comp, 0);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR,
"[event]Event thread exit");
return RETURN_OK;
}
static unsigned int unf_creat_event_center(void)
{
struct completion fc_event_completion =
COMPLETION_INITIALIZER(fc_event_completion);
struct completion *p_fc_event_completion = &fc_event_completion;
INIT_LIST_HEAD(&fc_event_list.list_head);
fc_event_list.list_num = 0;
spin_lock_init(&fc_event_list.fc_eventlist_lock);
fc_event_handle_thd_comp = p_fc_event_completion;
event_thread = kthread_run(unf_event_process, NULL, "hifc_event");
if (IS_ERR(event_thread)) {
complete_and_exit(fc_event_handle_thd_comp, 0);
fc_event_handle_thd_comp = NULL;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Create event thread failed(0x%p)",
event_thread);
return UNF_RETURN_ERROR;
}
wait_for_completion(fc_event_handle_thd_comp);
return RETURN_OK;
}
static void unf_cm_event_thread_exit(void)
{
struct completion fc_event_completion =
COMPLETION_INITIALIZER(fc_event_completion);
struct completion *p_fc_event_completion = &fc_event_completion;
fc_event_handle_thd_comp = p_fc_event_completion;
event_thread_exit = 1;
wake_up_process(event_thread);
wait_for_completion(fc_event_handle_thd_comp);
fc_event_handle_thd_comp = NULL;
}
static void unf_cm_cread_card_mgr_list(void)
{
/* So far, do not care */
INIT_LIST_HEAD(&card_thread_mgr.list_card_list_head);
spin_lock_init(&card_thread_mgr.global_card_list_lock);
card_thread_mgr.card_sum = 0;
}
static int unf_port_feature_pool_init(void)
{
unsigned int i = 0;
unsigned int rport_fea_pool_size = 0;
struct unf_rport_feature_recard_s *rport_fea_recard = NULL;
unsigned long flags = 0;
rport_fea_pool_size = sizeof(struct unf_rport_feature_pool_s);
port_fea_pool = vmalloc(rport_fea_pool_size);
if (!port_fea_pool) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]cannot allocate rport feature pool");
return UNF_RETURN_ERROR;
}
memset(port_fea_pool, 0, rport_fea_pool_size);
spin_lock_init(&port_fea_pool->port_fea_pool_lock);
INIT_LIST_HEAD(&port_fea_pool->list_busy_head);
INIT_LIST_HEAD(&port_fea_pool->list_free_head);
port_fea_pool->p_port_feature_pool_addr =
vmalloc((size_t)(RPORT_FEATURE_POOL_SIZE *
sizeof(struct unf_rport_feature_recard_s)));
if (!port_fea_pool->p_port_feature_pool_addr) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]cannot allocate rport feature pool address");
vfree(port_fea_pool);
port_fea_pool = NULL;
return UNF_RETURN_ERROR;
}
memset(port_fea_pool->p_port_feature_pool_addr, 0,
sizeof(struct unf_rport_feature_recard_s) *
RPORT_FEATURE_POOL_SIZE);
rport_fea_recard =
(struct unf_rport_feature_recard_s *)
port_fea_pool->p_port_feature_pool_addr;
spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags);
for (i = 0; i < RPORT_FEATURE_POOL_SIZE; i++) {
list_add_tail(&rport_fea_recard->entry_feature,
&port_fea_pool->list_free_head);
rport_fea_recard++;
}
spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags);
return RETURN_OK;
}
void unf_free_port_feature_pool(void)
{
if (port_fea_pool->p_port_feature_pool_addr) {
vfree(port_fea_pool->p_port_feature_pool_addr);
port_fea_pool->p_port_feature_pool_addr = NULL;
}
vfree(port_fea_pool);
port_fea_pool = NULL;
}
int unf_common_init(void)
{
int ret = RETURN_OK;
unf_dbg_level = UNF_MAJOR;
log_print_level = UNF_KEVENT;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"UNF Driver Version:%s.", UNF_FC_VERSION);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"UNF Compile Time: %s", __TIME_STR__);
ret = unf_port_feature_pool_init();
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port Feature Pool init failed");
return ret;
}
/* 1. Init Transport */
ret = (int)unf_register_ini_transport();
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]INI interface init failed");
unf_free_port_feature_pool();
return ret;
}
/* 2. Init L_Port MG: Y */
unf_port_mgmt_init();
/* 3. Init card MG list: N */
unf_cm_cread_card_mgr_list();
/* 4. Init global event resource: N */
ret = (int)unf_init_global_event_msg();
if (ret != RETURN_OK) {
unf_unregister_ini_transport();
unf_free_port_feature_pool();
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Create global event center failed");
return ret;
}
/* 5. Create event center(one thread per pf): Y */
ret = (int)unf_creat_event_center();
if (ret != RETURN_OK) {
unf_destroy_global_event_msg();
unf_unregister_ini_transport();
unf_free_port_feature_pool();
fc_event_handle_thd_comp = NULL;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Create event center (thread) failed");
return ret;
}
/* 6. Create work queue: Y */
unf_work_queue = create_workqueue("unf_wq");
if (!unf_work_queue) {
/* event thread exist */
unf_cm_event_thread_exit();
unf_destroy_global_event_msg();
fc_event_handle_thd_comp = NULL;
unf_unregister_ini_transport();
unf_free_port_feature_pool();
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Create work queue failed");
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Init common layer succeed");
return ret;
}
static void unf_destroy_dirty_port(void)
{
unsigned int v_ditry_port_num = 0;
unf_show_dirty_port(UNF_FALSE, &v_ditry_port_num);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Sys has %d dirty L_Port(s)", v_ditry_port_num);
}
void unf_common_exit(void)
{
unf_free_port_feature_pool();
unf_destroy_dirty_port();
flush_workqueue(unf_work_queue);
destroy_workqueue(unf_work_queue);
unf_work_queue = NULL;
unf_cm_event_thread_exit();
unf_destroy_global_event_msg();
unf_uninit_cm_low_level_handle();
unf_port_mgmt_deinit();
unf_unregister_ini_transport();
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[info]HIFC module remove succeed");
}

1338
hifc/unf_io.c Normal file

File diff suppressed because it is too large Load Diff

90
hifc/unf_io.h Normal file
View File

@ -0,0 +1,90 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_IO_H__
#define __UNF_IO_H__
#define UNF_MAX_TARGET_NUMBER 2048
#define UNF_DEFAULT_MAX_LUN 0xFFFF
#define UNF_MAX_DMA_SEGS 0x400
#define UNF_MAX_SCSI_CMND_LEN 16
#define UNF_MAX_SECTORS 0xffff
#define UNF_MAX_BUS_CHANNEL 0
#define UNF_DMA_BOUNDARY 0xffffffffffffffff
#define UNF_MAX_CMND_PER_LUN 64 /* LUN max command */
#define NO_SENSE 0x00
#define RECOVERED_ERROR 0x01
#define NOT_READY 0x02
#define MEDIUM_ERROR 0x03
#define HARDWARE_ERROR 0x04
#define ILLEGAL_REQUEST 0x05
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
#define COPY_ABORTED 0x0a
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
#define MISCOMPARE 0x0e
#define UNF_GET_SCSI_HOST_ID_BY_CMND(pcmd) ((pcmd)->scsi_host_id)
#define UNF_GET_SCSI_ID_BY_CMND(pcmd) ((pcmd)->scsi_id)
#define UNF_GET_HOST_PORT_BY_CMND(pcmd) ((pcmd)->drv_private)
#define UNF_GET_FCP_CMND(pcmd) ((pcmd)->pcmnd[0])
#define UNF_GET_DATA_LEN(pcmd) ((pcmd)->transfer_len)
#define UNF_GET_DATA_DIRECTION(pcmd) ((pcmd)->data_direction)
#define UNF_GET_HOST_CMND(pcmd) ((pcmd)->upper_cmnd)
#define UNF_GET_CMND_DONE_FUNC(pcmd) ((pcmd)->pfn_done)
#define UNF_GET_SGL_ENTRY_BUF_FUNC(pcmd) ((pcmd)->pfn_unf_ini_get_sgl_entry)
#define UNF_GET_SENSE_BUF_ADDR(pcmd) ((pcmd)->sense_buf)
#define UNF_GET_ERR_CODE_TABLE(pcmd) ((pcmd)->err_code_table)
#define UNF_GET_ERR_CODE_TABLE_COUNT(pcmd) ((pcmd)->err_code_table_cout)
#define UNF_SET_HOST_CMND(pcmd, host_cmd) ((pcmd)->upper_cmnd = (host_cmd))
#define UNF_SET_CMND_DONE_FUNC(pcmd, pfn) ((pcmd)->pfn_done = (pfn))
#define UNF_SET_RESID(pcmd, id_len) ((pcmd)->resid = (id_len))
#define UNF_SET_CMND_RESULT(pcmd, uiresult) ((pcmd)->result = ((int)uiresult))
#define UNF_DONE_SCSI_CMND(pcmd) ((pcmd)->pfn_done(pcmd))
#define UNF_GET_CMND_SGL(pcmd) ((pcmd)->sgl)
#define UNF_INI_GET_DIF_SGL(pcmd) ((pcmd)->dif_control.dif_sgl)
unsigned int unf_ini_scsi_completed(void *v_lport,
struct unf_frame_pkg_s *v_pkg);
unsigned int unf_ini_get_sgl_entry(void *v_pkg, char **v_buf,
unsigned int *v_buf_len);
unsigned int unf_ini_get_dif_sgl_entry(void *v_pkg, char **v_buf,
unsigned int *v_buf_len);
void unf_complete_cmnd(struct unf_scsi_cmd_s *v_scsi_cmnd, unsigned int result);
void unf_done_ini_xchg(struct unf_xchg_s *v_xchg);
unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg);
void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg);
int unf_cm_queue_command(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
struct unf_rport_s *unf_find_rport_by_scsi_id(
struct unf_lport_s *v_lport,
struct unf_ini_error_code_s *v_err_code_table,
unsigned int v_err_code_table_cout,
unsigned int v_scsi_id,
unsigned int *v_scsi_result);
struct unf_lport_s *unf_find_lport_by_scsi_cmd(
struct unf_scsi_cmd_s *v_scsi_cmnd);
void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg);
unsigned int unf_get_uplevel_cmnd_errcode(
struct unf_ini_error_code_s *v_err_table,
unsigned int v_err_table_count,
unsigned int v_drv_err_code);
#endif

926
hifc/unf_io_abnormal.c Normal file
View File

@ -0,0 +1,926 @@
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_exchg.h"
#include "unf_rport.h"
#include "unf_io.h"
#include "unf_portman.h"
#include "unf_service.h"
#include "unf_io_abnormal.h"
static int unf_send_abts_success(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg,
struct unf_scsi_cmd_s *v_scsi_cmnd,
unsigned int time_out_value)
{
int wait_marker = UNF_TRUE;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
unsigned int scsi_id;
unsigned int ret;
unsigned long flag = 0;
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
wait_marker = (v_xchg->abts_state & MARKER_STS_RECEIVED) ?
UNF_FALSE : UNF_TRUE;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
if (wait_marker) {
if (down_timeout(
&v_xchg->task_sema,
(long long)msecs_to_jiffies(time_out_value))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)",
v_lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->hot_pool_tag,
v_xchg->rx_id);
/* Cancel abts rsp timer when sema timeout */
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(
(void *)v_xchg);
/* Cnacel the flag of INI_IO_STATE_UPABORT and
* process the io in TMF
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
v_xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
return UNF_SCSI_ABORT_FAIL;
}
} else {
v_xchg->ucode_abts_state = UNF_IO_SUCCESS;
}
scsi_image_table = &v_lport->rport_scsi_table;
scsi_id = v_scsi_cmnd->scsi_id;
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) ||
(v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)",
v_lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->rx_id,
v_xchg->ucode_abts_state);
ret = DID_RESET;
UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret);
unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16);
return UNF_SCSI_ABORT_SUCCESS;
}
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
v_xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
/* Cancel abts rsp timer when sema timeout */
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) send ABTS failed. Exch(0x%p) oxid(0x%x) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)",
v_lport->port_id, v_xchg, v_xchg->ox_id,
v_xchg->hot_pool_tag,
v_xchg->scsi_cmnd_info.result, v_xchg->io_state);
/* return fail and then enter TMF */
return UNF_SCSI_ABORT_FAIL;
}
static int unf_ini_abort_cmnd(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg,
struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/*
* About INI_IO_STATE_UPABORT:
*
* 1. Check: AC power down
* 2. Check: L_Port destroy
* 3. Check: I/O XCHG timeout
* 4. Set ABORT: send ABTS
* 5. Set ABORT: LUN reset
* 6. Set ABORT: Target reset
* 7. Check: Prevent to send I/O to target (UNF_PreferToSendScsiCmnd)
* 8. Check: Done INI XCHG --->>> do not call scsi_done, return directly
* 9. Check: INI SCSI Complete --->>>
* do not call scsi_done, return directly
*/
#define UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT (2000) /* 2s */
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long flag = 0;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
unsigned int scsi_id;
unsigned int ret;
unsigned int time_out_value = (unsigned int)UNF_WAIT_SEM_TIMEOUT;
UNF_CHECK_VALID(0x1335, TRUE, v_lport, return UNF_SCSI_ABORT_FAIL);
lport = v_lport;
/* 1. Xchg State Set: INI_IO_STATE_UPABORT */
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state |= INI_IO_STATE_UPABORT;
rport = v_xchg->rport;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
/* 2. R_Port check */
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send ABTS but no RPort, OX_ID(0x%x) RX_ID(0x%x)",
lport->port_id, v_xchg->ox_id, v_xchg->rx_id);
return UNF_SCSI_ABORT_SUCCESS;
}
spin_lock_irqsave(&rport->rport_state_lock, flag);
if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) find RPort's state(0x%x) is not ready but send ABTS also, exchange(0x%p) tag(0x%x)",
lport->port_id, rport->rp_state,
v_xchg, v_xchg->hot_pool_tag);
/*
* Important: Send ABTS also & update timer
* Purpose: only used for release chip (uCode) resource,
* continue
*/
time_out_value = UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT;
}
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
/* 3. L_Port State check */
if (unlikely(lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) is removing", lport->port_id);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
return UNF_SCSI_ABORT_FAIL;
}
scsi_image_table = &lport->rport_scsi_table;
scsi_id = v_scsi_cmnd->scsi_id;
/* If pcie linkdown, complete this io and flush all io */
if (unlikely(lport->b_pcie_linkdown == UNF_TRUE)) {
ret = DID_RESET;
UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret);
unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16);
unf_free_lport_all_xchg(v_lport);
return UNF_SCSI_ABORT_SUCCESS;
}
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT,
"[abort]Port(0x%x) Exchg(0x%p) delay(%llu) SID(0x%x) DID(0x%x) wwpn(0x%llx) OxID(0x%x 0x%x) scsi_id(0x%x) lun_id(0x%x) cmdsn(0x%llx)",
lport->port_id, v_xchg,
(unsigned long long)jiffies_to_msecs(jiffies) -
(unsigned long long)jiffies_to_msecs(v_xchg->alloc_jif),
v_xchg->sid, v_xchg->did, rport->port_name,
v_xchg->ox_id, v_xchg->hot_pool_tag, v_scsi_cmnd->scsi_id,
(unsigned int)v_scsi_cmnd->lun_id, v_scsi_cmnd->cmnd_sn);
/* Init abts marker semaphore */
sema_init(&v_xchg->task_sema, 0);
if (v_xchg->scsi_cmnd_info.time_out != 0)
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(v_xchg);
/* Add timer for sending ABTS */
v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)v_xchg,
(unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT,
UNF_TIMER_TYPE_INI_ABTS);
/* 4. Send INI ABTS CMND */
if (unf_send_abts(lport, v_xchg) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) Send ABTS failed. Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)",
lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->hot_pool_tag,
v_xchg->rx_id);
/* Cancel timer when sending ABTS failed */
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(
(void *)v_xchg);
/* Cnacel the flag of INI_IO_STATE_UPABORT
* and process the io in TMF
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
v_xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
return UNF_SCSI_ABORT_FAIL;
}
return unf_send_abts_success(lport, v_xchg, v_scsi_cmnd,
time_out_value);
}
static void unf_flush_ini_resp_que(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x1335, TRUE, v_lport, return);
if (v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que)
(void)v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que(v_lport->fc_port);
}
int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/*
* SCSI ABORT Command --->>> FC ABTS Command
* If return ABORT_FAIL then enter TMF process
*/
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = NULL;
struct unf_rport_s *rport = NULL;
struct unf_lport_s *xchg_lport = NULL;
int ret;
unsigned long flag = 0;
/* 1. Get L_Port: Point to Scsi_host */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi host id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return UNF_SCSI_ABORT_FAIL;
}
/* 2. find target Xchg for INI Abort CMND */
xchg = unf_cm_lookup_xchg_by_cmnd_sn(lport, v_scsi_cmnd->cmnd_sn,
v_scsi_cmnd->world_id);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_ABNORMAL,
UNF_WARN,
"[warn]Port(0x%x) can't find exchange by Cmdsn(0x%lx)",
lport->port_id,
(unsigned long)v_scsi_cmnd->cmnd_sn);
unf_flush_ini_resp_que(lport);
return UNF_SCSI_ABORT_SUCCESS;
}
/* 3. increase ref_cnt to protect exchange */
ret = (int)unf_xchg_ref_inc(xchg, INI_EH_ABORT);
if (unlikely(ret != RETURN_OK)) {
unf_flush_ini_resp_que(lport);
return UNF_SCSI_ABORT_SUCCESS;
}
v_scsi_cmnd->upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd;
xchg->debug_hook = UNF_TRUE;
/* 4. Exchang L_Port/R_Port Get & check */
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
xchg_lport = xchg->lport;
rport = xchg->rport;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (unlikely(!xchg_lport || !rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Exchange(0x%p)'s L_Port or R_Port is NULL, state(0x%x)",
xchg, xchg->io_state);
unf_xchg_ref_dec(xchg, INI_EH_ABORT);
if (!xchg_lport)
return UNF_SCSI_ABORT_FAIL; /* for L_Port */
return UNF_SCSI_ABORT_SUCCESS; /* for R_Port */
}
/* 5. Send INI Abort Cmnd */
ret = unf_ini_abort_cmnd(xchg_lport, xchg, v_scsi_cmnd);
/* 6. decrease exchange ref_cnt */
unf_xchg_ref_dec(xchg, INI_EH_ABORT);
return ret;
}
static unsigned int unf_tmf_timeout_recovery_default(void *v_rport,
void *v_xchg)
{
struct unf_lport_s *lport = NULL;
unsigned long flag = 0;
struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg;
struct unf_rport_s *rport = (struct unf_rport_s *)v_rport;
lport = xchg->lport;
UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&rport->rport_state_lock, flag);
unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
unf_rport_enter_logo(lport, rport);
return RETURN_OK;
}
void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg)
{
struct unf_lport_s *lport = NULL;
unsigned long flag = 0;
struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg;
struct unf_rport_s *rport = (struct unf_rport_s *)v_rport;
lport = xchg->lport;
UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return);
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
if (INI_IO_STATE_DONE & xchg->io_state) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
return;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (xchg->rport_bind_jifs != rport->rport_alloc_jifs)
return;
spin_lock_irqsave(&rport->rport_state_lock, flag);
unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
unf_rport_enter_logo(lport, rport);
}
unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg)
{
/* Do port reset or R_Port LOGO */
int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg;
struct unf_rport_s *rport = (struct unf_rport_s *)v_rport;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport,
return UNF_RETURN_ERROR);
lport = xchg->lport->root_lport;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport,
return UNF_RETURN_ERROR);
/* 1. TMF response timeout & Marker STS timeout */
if (!(xchg->tmf_state &
(MARKER_STS_RECEIVED | TMF_RESPONSE_RECEIVED))) {
/* TMF timeout & marker timeout */
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) receive marker status timeout and do recovery",
lport->port_id);
/* Do port reset */
ret = unf_cm_reset_port(lport->port_id);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[warn]Port(0x%x) do reset failed",
lport->port_id);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
/* 2. default case: Do LOGO process */
unf_tmf_timeout_recovery_default(rport, xchg);
return RETURN_OK;
}
void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg)
{
/*
* for device(lun)/target(session) reset:
* Do port reset or R_Port LOGO
*/
if (v_lport->pfn_unf_tmf_abnormal_recovery)
v_lport->pfn_unf_tmf_abnormal_recovery((void *)v_rport,
(void *)v_xchg);
}
static void unf_build_task_mgmt_fcp_cmnd(struct unf_fcp_cmnd_s *v_fcp_cmnd,
struct unf_scsi_cmd_s *v_scsi_cmnd,
enum unf_task_mgmt_cmnd_e v_task_mgmt)
{
UNF_CHECK_VALID(0x1339, UNF_TRUE, v_fcp_cmnd, return);
UNF_CHECK_VALID(0x1340, UNF_TRUE, v_scsi_cmnd, return);
unf_big_end_to_cpu((void *)v_scsi_cmnd->pc_lun_id, UNF_FCP_LUNID_LEN_8);
(*(unsigned long long *)(v_scsi_cmnd->pc_lun_id)) >>= 8;
memcpy(v_fcp_cmnd->lun, v_scsi_cmnd->pc_lun_id,
sizeof(v_fcp_cmnd->lun));
/*
* If the TASK MANAGEMENT FLAGS field is set to a nonzero value,
* the FCP_CDB field, the FCP_DL field, the TASK ATTRIBUTE field,
* the RDDATA bit, and the WRDATA bit shall be ignored and the
* FCP_BIDIRECTIONAL_READ_DL field shall not be
* included in the FCP_CMND IU payload
*/
v_fcp_cmnd->control = UNF_SET_TASK_MGMT_FLAGS(v_task_mgmt);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"SCSI cmnd(0x%x) is task mgmt cmnd. ntrl Flag(LITTLE END) is 0x%x.",
v_task_mgmt, v_fcp_cmnd->control);
}
int unf_send_scsi_mgmt_cmnd(struct unf_xchg_s *v_xchg,
struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_scsi_cmd_s *v_scsi_cmnd,
enum unf_task_mgmt_cmnd_e v_task_mgnt_cmd_type)
{
/*
* 1. Device/LUN reset
* 2. Target/Session reset
*/
struct unf_xchg_s *xchg = NULL;
int ret = SUCCESS;
struct unf_frame_pkg_s pkg = { 0 };
unsigned long flag = 0;
UNF_CHECK_VALID(0x1341, UNF_TRUE, v_xchg, return FAILED);
UNF_CHECK_VALID(0x1342, UNF_TRUE, v_lport, return FAILED);
UNF_CHECK_VALID(0x1343, UNF_TRUE, v_rport, return FAILED);
UNF_CHECK_VALID(0x1344, UNF_TRUE, v_scsi_cmnd, return FAILED);
UNF_CHECK_VALID(0x1345, UNF_TRUE,
((v_task_mgnt_cmd_type <= UNF_FCP_TM_TERMINATE_TASK) &&
(v_task_mgnt_cmd_type >= UNF_FCP_TM_QUERY_TASK_SET)),
return FAILED);
xchg = v_xchg;
xchg->lport = v_lport;
xchg->rport = v_rport;
/* 1. State: Up_Task */
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
xchg->io_state |= INI_IO_STATE_UPTASK;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (v_lport->low_level_func.xchg_mgr_type ==
UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) {
xchg->ox_id = xchg->hot_pool_tag;
pkg.frame_head.oxid_rxid =
((unsigned int)xchg->ox_id << 16) | xchg->rx_id;
}
/* 2. Set TASK MANAGEMENT FLAGS of FCP_CMND to
* the corresponding task management command
*/
unf_build_task_mgmt_fcp_cmnd(&xchg->fcp_cmnd, v_scsi_cmnd,
v_task_mgnt_cmd_type);
pkg.xchg_contex = xchg;
pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = v_rport->rport_index;
pkg.fcp_cmnd = &xchg->fcp_cmnd;
pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hot_pool_tag;
pkg.frame_head.csctl_sid = v_lport->nport_id;
pkg.frame_head.rctl_did = v_rport->nport_id;
pkg.unf_rsp_pload_bl.buffer_ptr =
(unsigned char *)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu;
pkg.unf_rsp_pload_bl.buf_dma_addr =
v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr;
pkg.unf_rsp_pload_bl.length = PAGE_SIZE;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME];
if (unlikely(v_lport->b_pcie_linkdown == UNF_TRUE)) {
unf_free_lport_all_xchg(v_lport);
return SUCCESS;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) Hottag(0x%x) lunid(0x%llx)",
v_lport->port_id, v_task_mgnt_cmd_type,
v_rport->nport_id, xchg->hot_pool_tag,
*((unsigned long long *)v_scsi_cmnd->pc_lun_id));
/* 3. Init exchange task semaphore */
sema_init(&xchg->task_sema, 0);
/* 4. Send Mgmt Task to low-level */
if (unf_hardware_start_io(v_lport, &pkg) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) failed",
v_lport->port_id, v_task_mgnt_cmd_type,
v_rport->nport_id);
return FAILED;
}
/*
* semaphore timeout
*
* Code review: The second input parameter needs to
* be converted to jiffies.
* set semaphore after the message is sent successfully.
* The semaphore is returned when the semaphore times out
* or is woken up.
*
* 5. The semaphore is cleared and counted when the Mgmt
* Task message is sent,
* and is Wake Up when the RSP message is received.
* If the semaphore is not Wake Up, the semaphore is
* triggered after timeout.
* That is, no RSP message is received within the timeout period.
*/
if (down_timeout(&xchg->task_sema,
(long long)msecs_to_jiffies((unsigned int)UNF_WAIT_SEM_TIMEOUT))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) timeout scsi id(0x%x) lun id(0x%x)",
v_lport->nport_id, v_task_mgnt_cmd_type,
v_rport->nport_id,
v_scsi_cmnd->scsi_id,
(unsigned int)v_scsi_cmnd->lun_id);
/* semaphore timeout */
ret = FAILED;
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
if (v_lport->en_states == UNF_LPORT_ST_RESET)
ret = SUCCESS;
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
return ret;
}
/*
* 6. NOTE: no timeout (has been waken up)
* Do Scsi_Cmnd(Mgmt Task) result checking
*
* FAILED: with error code or RSP is error
* SUCCESS: others
*/
if (xchg->scsi_cmnd_info.result == UNF_IO_SUCCESS) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp succeed",
v_lport->nport_id, v_task_mgnt_cmd_type,
v_rport->nport_id);
ret = SUCCESS;
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp failed scsi id(0x%x) lun id(0x%x)",
v_lport->nport_id, v_task_mgnt_cmd_type,
v_rport->nport_id,
v_scsi_cmnd->scsi_id,
(unsigned int)v_scsi_cmnd->lun_id);
ret = FAILED;
}
return ret;
}
int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI Device/LUN Reset Command --->>> FC LUN/Device Reset Command */
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int cmnd_result = 0;
int ret = SUCCESS;
UNF_CHECK_VALID(0x1349, UNF_TRUE, v_scsi_cmnd, return FAILED);
UNF_CHECK_VALID(0x1350, UNF_TRUE, v_scsi_cmnd->pc_lun_id,
return FAILED);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[event]Enter device/LUN reset handler");
/* 1. Get L_Port */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi_host_id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return FAILED;
}
/* 2. L_Port State checking */
if (unlikely(lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) is removing", lport);
return FAILED;
}
/*
* 3. Get R_Port: no rport is found or rport is not ready,return ok
* from: L_Port -->> rport_scsi_table (image table)
* -->> rport_info_table
*/
rport = unf_find_rport_by_scsi_id(lport,
v_scsi_cmnd->err_code_table,
v_scsi_cmnd->err_code_table_cout,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd),
&cmnd_result);
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) Can't find rport by scsi_id(0x%x)",
lport->port_id,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
return SUCCESS;
}
/*
* 4. Set the I/O of the corresponding LUN to abort.
*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
unf_cm_xchg_abort_by_lun(
lport, rport,
*((unsigned long long *)v_scsi_cmnd->pc_lun_id),
NULL, UNF_FALSE);
/* 5. R_Port state check */
if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) state(0x%x) SCSI Command(0x%p), rport is not ready",
lport->port_id, rport->nport_id,
rport->rp_state, v_scsi_cmnd);
return SUCCESS;
}
/* 6. Get & inc ref_cnt free Xchg for Device reset */
xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport,
UNF_XCHG_TYPE_INI);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) can't get free exchange", lport);
return FAILED;
}
/* increase ref_cnt for protecting exchange */
ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET);
UNF_CHECK_VALID(0x1351, UNF_TRUE, (ret == RETURN_OK), return FAILED);
/* 7. Send Device/LUN Reset to Low level */
ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport,
v_scsi_cmnd,
UNF_FCP_TM_LOGICAL_UNIT_RESET);
if (unlikely(ret == FAILED)) {
/*
* Do port reset or R_Port LOGO:
* 1. FAILED: send failed
* 2. FAILED: semaphore timeout
* 3. SUCCESS: rcvd rsp & semaphore has been waken up
*/
unf_tmf_abnormal_recovery(lport, rport, xchg);
}
/*
* 8. Release resource immediately if necessary
* NOTE: here, semaphore timeout or rcvd rsp
* (semaphore has been waken up)
*/
if (likely((lport->b_port_removing != UNF_TRUE) ||
(lport->root_lport != lport)))
unf_cm_free_xchg(xchg->lport, xchg);
/* decrease ref_cnt */
unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET);
return SUCCESS;
}
int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI Target Reset Command --->>> FC Session Reset/Delete Command */
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int cmnd_result = 0;
int ret;
UNF_CHECK_VALID(0x1355, UNF_TRUE, v_scsi_cmnd, return FAILED);
UNF_CHECK_VALID(0x1356, UNF_TRUE, v_scsi_cmnd->pc_lun_id,
return FAILED);
/* 1. Get L_Port */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi_host_id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return FAILED;
}
/* 2. L_Port State check */
if (unlikely(lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) is removing", lport);
return FAILED;
}
/*
* 3. Get R_Port: no rport is found or rport is not ready,return ok
* from: L_Port -->> rport_scsi_table (image table) -->>
* rport_info_table
*/
rport = unf_find_rport_by_scsi_id(lport,
v_scsi_cmnd->err_code_table,
v_scsi_cmnd->err_code_table_cout,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd),
&cmnd_result);
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find rport by scsi_id(0x%x)",
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
return SUCCESS;
}
/*
* 4. set UP_ABORT on Target IO and Session IO
*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
unf_cm_xchg_abort_by_session(lport, rport);
/* 5. R_Port state check */
if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) state(0x%x) is not ready, SCSI Command(0x%p)",
lport->port_id, rport->nport_id,
rport->rp_state, v_scsi_cmnd);
return SUCCESS;
}
/* 6. Get free Xchg for Target Reset CMND */
xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport,
UNF_XCHG_TYPE_INI);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) can't get free exchange", lport);
return FAILED;
}
/* increase ref_cnt to protect exchange */
ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET);
UNF_CHECK_VALID(0x1357, UNF_TRUE, (ret == RETURN_OK), return FAILED);
/* 7. Send Target Reset Cmnd to low-level */
ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport, v_scsi_cmnd,
UNF_FCP_TM_TARGET_RESET);
if (unlikely(ret == FAILED)) {
/*
* Do port reset or R_Port LOGO:
* 1. FAILED: send failed
* 2. FAILED: semaphore timeout
* 3. SUCCESS: rcvd rsp & semaphore has been waken up
*/
unf_tmf_abnormal_recovery(lport, rport, xchg);
}
/*
* 8. Release resource immediately if necessary
* NOTE: here, semaphore timeout or rcvd rsp
* (semaphore has been waken up)
*/
if (likely((lport->b_port_removing != UNF_TRUE) ||
(lport->root_lport != lport)))
unf_cm_free_xchg(xchg->lport, xchg);
/* decrease exchange ref_cnt */
unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET);
return SUCCESS;
}
int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI BUS Reset Command --->>> FC Port Reset Command */
struct unf_lport_s *lport = NULL;
int cmnd_result = 0;
/* 1. Get L_Port */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi_host_id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return FAILED;
}
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT,
"[event]Do port reset with scsi_bus_reset");
cmnd_result = unf_cm_reset_port(lport->port_id);
if (unlikely(cmnd_result == UNF_RETURN_ERROR))
return FAILED;
else
return SUCCESS;
}
void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg,
struct unf_xchg_s *v_xchg)
{
unsigned char *rsp_info = NULL;
unsigned char rsp_code = 0;
unsigned int code_index = 0;
/*
* LLT found that:RSP_CODE is the third byte of FCP_RSP_INFO,
* on Little endian should be byte 0, For detail FCP_4 Table 26
* FCP_RSP_INFO field format
*
* 1. state setting
* 2. wake up semaphore
*/
UNF_CHECK_VALID(0x1321, TRUE, v_pkg, return);
UNF_CHECK_VALID(0x1322, TRUE, v_xchg, return);
v_xchg->tmf_state |= TMF_RESPONSE_RECEIVED;
if (UNF_GET_LL_ERR(v_pkg) != UNF_IO_SUCCESS) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Send scsi manage command failed with error code(0x%x)",
UNF_GET_LL_ERR(v_pkg));
v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED;
/* wakeup semaphore & return */
up(&v_xchg->task_sema);
return;
}
rsp_info = v_pkg->unf_rsp_pload_bl.buffer_ptr;
if (!rsp_info && (v_pkg->unf_rsp_pload_bl.length != 0)) {
rsp_info =
(unsigned char *)
v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu;
/* change to little end if necessary */
if (rsp_info && (v_pkg->byte_orders & UNF_BIT_3))
unf_big_end_to_cpu(
rsp_info,
v_pkg->unf_rsp_pload_bl.length);
}
if (!rsp_info) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]FCP response data pointer is NULL with Xchg TAG(0x%x)",
v_xchg->hot_pool_tag);
v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS;
/* wakeup semaphore & return */
up(&v_xchg->task_sema);
return;
}
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]FCP response data length(0x%x), RSP_CODE(0x%x:%x:%x:%x:%x:%x:%x:%x)",
v_pkg->unf_rsp_pload_bl.length,
rsp_info[0],
rsp_info[1],
rsp_info[2],
rsp_info[3],
rsp_info[4],
rsp_info[5],
rsp_info[6],
rsp_info[7]);
rsp_code = rsp_info[code_index];
if ((rsp_code == UNF_FCP_TM_RSP_COMPLETE) ||
(rsp_code == UNF_FCP_TM_RSP_SUCCEED))
v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS;
else
v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED;
/* wakeup semaphore & return */
up(&v_xchg->task_sema);
}

16
hifc/unf_io_abnormal.h Normal file
View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_IO__ABNORMAL_H__
#define __UNF_IO__ABNORMAL_H__
#define UNF_GET_LL_ERR(v_pkg) ((v_pkg->status) >> 16)
void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg,
struct unf_xchg_s *v_xchg);
unsigned int unf_hardware_start_io(struct unf_lport_s *v_lport,
struct unf_frame_pkg_s *v_pkg);
#endif

183
hifc/unf_log.h Normal file
View File

@ -0,0 +1,183 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_LOG_H__
#define __UNF_LOG_H__
#define UNF_CRITICAL 1
#define UNF_ERR 2
#define UNF_WARN 3
#define UNF_KEVENT 4
#define UNF_MAJOR 5
#define UNF_MINOR 6
#define UNF_INFO 7
#define UNF_DATA 7
#define UNF_ALL 7
enum unf_debug_type_e {
UNF_DEBUG_TYPE_MML = 0,
UNF_DEBUG_TYPE_DIAGNOSE = 1,
UNF_DEBUG_TYPE_MESSAGE = 2,
UNF_DEBUG_TYPE_BUTT
};
enum unf_log_attr_e {
UNF_LOG_LOGIN_ATT = 0x1,
UNF_LOG_IO_ATT = 0x2,
UNF_LOG_EQUIP_ATT = 0x4,
UNF_LOG_REG_ATT = 0x8,
UNF_LOG_REG_MML_TEST = 0x10,
UNF_LOG_EVENT = 0x20,
UNF_LOG_NORMAL = 0x40,
UNF_LOG_ABNORMAL = 0X80,
UNF_LOG_BUTT
};
enum event_log_e {
UNF_EVTLOG_DRIVER_SUC = 0,
UNF_EVTLOG_DRIVER_INFO,
UNF_EVTLOG_DRIVER_WARN,
UNF_EVTLOG_DRIVER_ERR,
UNF_EVTLOG_LINK_SUC,
UNF_EVTLOG_LINK_INFO,
UNF_EVTLOG_LINK_WARN,
UNF_EVTLOG_LINK_ERR,
UNF_EVTLOG_IO_SUC,
UNF_EVTLOG_IO_INFO,
UNF_EVTLOG_IO_WARN,
UNF_EVTLOG_IO_ERR,
UNF_EVTLOG_TOOL_SUC,
UNF_EVTLOG_TOOL_INFO,
UNF_EVTLOG_TOOL_WARN,
UNF_EVTLOG_TOOL_ERR,
UNF_EVTLOG_BUT
};
#define UNF_IO_ATT_PRINT_TIMES 2
#define UNF_LOGIN_ATT_PRINT_TIMES 100
#define UNF_IO_ATT_PRINT_LIMIT msecs_to_jiffies(6 * 1000)
extern unsigned int unf_dbg_level;
extern unsigned int log_print_level;
extern unsigned int log_limted_times;
#define DRV_LOG_LIMIT(module_id, log_level, log_id, log_att, format, ...) \
do { \
static unsigned long pre; \
static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \
if (time_after_eq(jiffies, pre + (UNF_IO_ATT_PRINT_LIMIT))) { \
if (log_att == UNF_LOG_ABNORMAL) { \
should_print = UNF_IO_ATT_PRINT_TIMES; \
} else { \
should_print = log_limted_times; \
} \
} \
if (should_print < 0) { \
if (log_att != UNF_LOG_ABNORMAL) { \
pre = jiffies; \
} \
break; \
} \
if (should_print-- > 0) { \
printk(log_level \
"[%d][FC_UNF]" format "[%s][%-5d]\n", \
smp_processor_id(), ##__VA_ARGS__, \
__func__, __LINE__); \
} \
if (should_print == 0) { \
printk(log_level \
"[FC_UNF]log is limited[%s][%-5d]\n", \
__func__, __LINE__); \
} \
pre = jiffies; \
} while (0)
#define UNF_CHECK_VALID(logid, need_check, condition, fail_do) \
do { \
if (unlikely(!(condition))) { \
UNF_TRACE((logid), UNF_LOG_REG_ATT, UNF_ERR, \
"Para check(%s) invalid", #condition); \
fail_do; \
} \
} while (0)
#define HIUNF_TRACE(log_id, log_att, log_level, format, ...) \
do { \
if (unlikely((log_level) <= log_print_level)) { \
if (log_level == UNF_CRITICAL) { \
DRV_LOG_LIMIT(UNF_PID, KERN_CRIT, log_id, \
log_att, format, ##__VA_ARGS__); \
} else if (log_level == UNF_WARN) { \
DRV_LOG_LIMIT(UNF_PID, KERN_WARNING, log_id, \
log_att, format, ##__VA_ARGS__); \
} else if (log_level == UNF_ERR) { \
DRV_LOG_LIMIT(UNF_PID, KERN_ERR, log_id, \
log_att, format, ##__VA_ARGS__); \
} else if (log_level == UNF_MAJOR || \
log_level == UNF_MINOR || \
log_level == UNF_KEVENT) { \
DRV_LOG_LIMIT(UNF_PID, KERN_NOTICE, log_id, \
log_att, format, ##__VA_ARGS__); \
} else if (log_level == UNF_INFO || \
log_level == UNF_DATA) { \
DRV_LOG_LIMIT(UNF_PID, KERN_INFO, log_id, \
log_att, format, ##__VA_ARGS__); \
} \
} \
} while (0)
#define UNF_TRACE(log_id, log_att, log_level, fmt, ...) \
do { \
HIUNF_TRACE(log_id, log_att, log_level, fmt, ##__VA_ARGS__); \
} while (0)
#define UNF_INIT_PRIVATE_ST(private_st) \
do { \
memset(&(private_st), 0, sizeof(private_st)); \
} while (0)
#define UNF_PRINT_SFS(dbg_level, portid, v_data, v_size) \
do { \
if ((dbg_level) <= log_print_level) { \
unsigned int cnt = 0; \
printk(KERN_INFO "[INFO]Port(0x%x) sfs:0x", \
(portid)); \
for (cnt = 0; cnt < (v_size) / 4; cnt++) { \
printk(KERN_INFO "%08x ", \
((unsigned int *)v_data)[cnt]); \
} \
printk(KERN_INFO "[FC_UNF][%s]\n", __FUNCTION__); \
} \
} while (0)
#define UNF_PRINT_SFS_LIMIT(dbg_level, portid, v_data, v_size) \
do { \
if ((dbg_level) <= log_print_level) { \
static unsigned long pre; \
static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \
if (time_after_eq(jiffies, pre + \
UNF_IO_ATT_PRINT_LIMIT)) { \
should_print = log_limted_times; \
} \
if (should_print < 0) { \
pre = jiffies; \
break; \
} \
if (should_print-- > 0) { \
UNF_PRINT_SFS(dbg_level, portid, \
v_data, v_size); \
} \
if (should_print == 0) { \
printk(KERN_INFO "[FC_UNF]sfs log is limited[%s][%-5d]\n", \
__func__, __LINE__); \
} \
pre = jiffies; \
} \
} while (0)
#define UNF_REFERNCE_VAR(var)
#endif

1129
hifc/unf_lport.c Normal file

File diff suppressed because it is too large Load Diff

568
hifc/unf_lport.h Normal file
View File

@ -0,0 +1,568 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_LPORT_H
#define __UNF_LPORT_H
#include "unf_disc.h"
#include "unf_event.h"
#include "unf_common.h"
#define UNF_PORT_TYPE_FC 0
#define UNF_PORT_TYPE_DISC 1
#define UNF_FW_UPDATE_PATH_LEN_MAX 255
#define UNF_EXCHG_MGR_NUM (4)
#define UNF_MAX_IO_RETURN_VALUE 0x12
#define UNF_MAX_SCSI_CMD 0xFF
enum unf_scsi_error_handle_type {
UNF_SCSI_ABORT_IO_TYPE = 0,
UNF_SCSI_DEVICE_RESET_TYPE,
UNF_SCSI_TARGET_RESET_TYPE,
UNF_SCSI_BUS_RESET_TYPE,
UNF_SCSI_HOST_RESET_TYPE,
UNF_SCSI_VIRTUAL_RESET_TYPE,
UNF_SCSI_ERROR_HANDLE_BUTT
};
enum unf_lport_destroy_step_e {
UNF_LPORT_DESTROY_STEP_0_SET_REMOVING = 0,
UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT,
UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE,
UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER,
UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR,
UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL,
UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR,
UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP,
UNF_LPORT_DESTROY_STEP_8_DESTROY_RPORT_MG_TMP,
UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP,
UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE,
UNF_LPORT_DESTROY_STEP_11_UNREG_TGT_HOST,
UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST,
UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE,
UNF_LPORT_DESTROY_STEP_BUTT
};
enum unf_lport_enhanced_feature_e {
/* Enhance GFF feature connect even if fail to get GFF feature */
UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF = 0x0001,
/* Enhance IO balance */
UNF_LPORT_ENHANCED_FEATURE_IO_TRANSFERLIST = 0x0002,
/* Enhance IO check */
UNF_LPORT_ENHANCED_FEATURE_IO_CHECKPOINT = 0x0004,
/* Close FW ROUTE */
UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE = 0x0008,
/* lowest frequency read SFP information */
UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE = 0x0010,
UNF_LPORT_ENHANCED_FEATURE_BUTT
};
enum unf_lport_login_state_e {
UNF_LPORT_ST_ONLINE = 0x2000, /* uninitialized */
UNF_LPORT_ST_INITIAL, /* initialized and LinkDown */
UNF_LPORT_ST_LINK_UP, /* initialized and Link UP */
UNF_LPORT_ST_FLOGI_WAIT, /* waiting for FLOGI completion */
UNF_LPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */
UNF_LPORT_ST_RNN_ID_WAIT, /* waiting for RNN_ID completion */
UNF_LPORT_ST_RSNN_NN_WAIT, /* waiting for RSNN_NN completion */
UNF_LPORT_ST_RSPN_ID_WAIT, /* waiting for RSPN_ID completion */
UNF_LPORT_ST_RPN_ID_WAIT, /* waiting for RPN_ID completion */
UNF_LPORT_ST_RFT_ID_WAIT, /* waiting for RFT_ID completion */
UNF_LPORT_ST_RFF_ID_WAIT, /* waiting for RFF_ID completion */
UNF_LPORT_ST_SCR_WAIT, /* waiting for SCR completion */
UNF_LPORT_ST_READY, /* ready for use */
UNF_LPORT_ST_LOGO, /* waiting for LOGO completion */
UNF_LPORT_ST_RESET, /* being reset and will restart */
UNF_LPORT_ST_OFFLINE, /* offline */
UNF_LPORT_ST_BUTT
};
enum unf_lport_event_e {
UNF_EVENT_LPORT_NORMAL_ENTER = 0x8000, /* next state enter */
UNF_EVENT_LPORT_ONLINE = 0x8001, /* LPort link up */
UNF_EVENT_LPORT_LINK_UP = 0x8002, /* LPort link up */
UNF_EVENT_LPORT_LINK_DOWN = 0x8003, /* LPort link down */
UNF_EVENT_LPORT_OFFLINE = 0x8004, /* lPort bing stopped */
UNF_EVENT_LPORT_RESET = 0x8005,
UNF_EVENT_LPORT_REMOTE_ACC = 0x8006, /* next state enter */
UNF_EVENT_LPORT_REMOTE_RJT = 0x8007, /* rport reject */
UNF_EVENT_LPORT_REMOTE_TIMEOUT = 0x8008, /* rport time out */
UNF_EVENT_LPORT_READY = 0x8009,
UNF_EVENT_LPORT_REMOTE_BUTT
};
struct unf_cm_disc_mg_template_s {
/* start input:L_Port,return:ok/fail */
unsigned int (*pfn_unf_disc_start)(void *v_lport);
/* stop input: L_Port,return:ok/fail */
unsigned int (*pfn_unf_disc_stop)(void *v_lport);
/* Callback after disc complete[with event:ok/fail]. */
void (*pfn_unf_disc_callback)(void *v_lport, unsigned int v_result);
};
struct unf_chip_manage_info_s {
struct list_head list_chip_thread_entry;
struct list_head list_head;
spinlock_t chip_event_list_lock;
struct task_struct *data_thread;
unsigned int list_num;
unsigned int slot_id;
unsigned char chip_id;
unsigned char rsv;
unsigned char sfp_9545_fault; /* 9545 fault */
unsigned char sfp_power_fault; /* SFP power fault */
atomic_t ref_cnt;
unsigned int b_thread_exit;
struct unf_chip_info_s chip_info;
atomic_t card_loop_test_flag;
spinlock_t card_loop_back_state_lock;
char update_path[UNF_FW_UPDATE_PATH_LEN_MAX];
};
enum unf_timer_type_e {
UNF_TIMER_TYPE_INI_IO,
UNF_TIMER_TYPE_REQ_IO,
UNF_TIMER_TYPE_INI_RRQ,
UNF_TIMER_TYPE_SFS,
UNF_TIMER_TYPE_INI_ABTS
};
struct unf_cm_xchg_mgr_template_s {
/* Get new Xchg */
/* input:L_Port,ini/tgt type,return:initialized Xchg */
void *(*pfn_unf_xchg_get_free_and_init)(void *, unsigned int,
unsigned short);
/* OXID,SID lookup Xchg */
/* input: L_Port,OXID,SID,return:Xchg */
void *(*pfn_unf_look_up_xchg_by_id)(void *, unsigned short,
unsigned int);
/* input:L_Port,tag,return:Xchg */
void *(*pfn_unf_look_up_xchg_by_tag)(void *, unsigned short);
/* free Xchg */
/* input:L_Port,Xchg,return:void */
void (*pfn_unf_xchg_release)(void *, void *);
/* Abort IO Xchg by SID/DID */
/* input:L_Port,SID,DID,return:void */
void (*pfn_unf_xchg_mgr_io_xchg_abort)(void *, void *, unsigned int,
unsigned int, unsigned int);
/* Abort SFS Xchg by SID/DID */
/* input:L_Port,SID,DID,return:void */
void (*pfn_unf_xchg_mgr_sfs_xchg_abort)(void *, void *,
unsigned int, unsigned int);
/* Clean Xchg by SID/DID */
/* input:L_Port,SID,DID,return:void */
void (*pfn_unf_xchg_mgr_xchg_clean)(void *, unsigned int,
unsigned int);
/* Add Xchg timer */
void (*pfn_unf_xchg_add_timer)(void *, unsigned long,
enum unf_timer_type_e);
/* Cancel Xchg timer */
void (*pfn_unf_xchg_cancel_timer)(void *);
/* L_Port, Abort flag */
void (*pfn_unf_xchg_abort_all_io)(void *, unsigned int, int);
/* find Xchg by scsi Cmnd sn */
void *(*pfn_unf_look_up_xchg_by_cmnd_sn)(void *, unsigned long long,
unsigned int);
/* input:L_Port,unsigned long long */
void (*pfn_unf_xchg_abort_by_lun)(void *, void *, unsigned long long,
void *, int);
void (*pfn_unf_xchg_abort_by_session)(void *, void *);
};
struct unf_rport_pool_s {
unsigned int rport_pool_count;
void *rport_pool_add;
struct list_head list_rports_pool;
spinlock_t rport_free_pool_lock;
/* for synchronous reuse RPort POOL completion */
struct completion *rport_pool_completion;
unsigned long *pul_rpi_bitmap;
};
struct unf_cm_lport_template_s {
/* Get VPort struct and init */
/* input:pstLport,ini/tgt type,return:pstVport */
void *(*pfn_unf_vport_get_free_and_init)(void *, unsigned int);
/* For fast IO path */
/* input: pstLport, VpIndex, return:pstVport */
void *(*pfn_unf_lookup_vport_by_vp_index)(void *, unsigned short);
/* input: pstLport, PortId,return:pstVport */
void *(*pfn_unf_lookup_vport_by_port_id)(void *, unsigned int);
/* input:pstLport, wwpn, return:pstVport */
void *(*pfn_unf_lookup_vport_by_wwpn)(void *, unsigned long long);
/* input:L_Port, DID, return:pstVport */
void *(*pfn_unf_lookup_vport_by_did)(void *, unsigned int);
/* input:L_Port,return:void */
void (*pfn_unf_vport_remove)(void *);
};
struct unf_vport_pool_s {
unsigned short vport_pool_count;
void *vport_pool_addr;
struct list_head list_vport_pool;
spinlock_t vport_pool_lock;
struct completion *vport_pool_completion;
unsigned short slab_next_index; /* Next free vport */
unsigned short slab_total_sum; /* Total Vport num */
struct unf_lport_s *vport_slab[0];
};
struct unf_esgl_pool_s {
unsigned int esgl_pool_count;
void *esgl_pool_addr;
struct list_head list_esgl_pool;
spinlock_t esgl_pool_lock;
struct buf_describe_s esgl_buf_list;
};
/* little endium */
struct unf_port_id_page_s {
struct list_head list_node_rscn;
unsigned char port_id_port;
unsigned char port_id_area;
unsigned char port_id_domain;
unsigned char uc_addr_format : 2;
unsigned char uc_event_qualifier : 4;
unsigned char uc_reserved : 2;
};
struct unf_rscn_mg_s {
spinlock_t rscn_id_list_lock;
unsigned int free_rscn_count;
/* free RSCN page list */
struct list_head list_free_rscn_page;
/* using RSCN page list */
struct list_head list_using_rscn_page;
/* All RSCN PAGE Address */
void *rscn_pool_add;
struct unf_port_id_page_s *(*pfn_unf_get_free_rscn_node)(
void *v_rscn_mg);
void (*pfn_unf_release_rscn_node)(void *v_rscn_mg, void *v_rscn_node);
};
struct unf_disc_rport_mg_s {
void *disc_pool_add;
struct list_head list_disc_rports_pool; /* discovery DISC Rport pool */
struct list_head list_disc_rport_busy; /* Busy discovery DiscRport */
};
struct unf_disc_manage_info_s {
struct list_head list_head;
spinlock_t disc_event_list_lock;
atomic_t disc_contrl_size;
unsigned int b_thread_exit;
struct task_struct *data_thread;
};
struct unf_disc_s {
unsigned int retry_count; /* current retry counter */
unsigned int max_retry_count; /* retry counter */
unsigned int disc_flag; /* Disc flag :Loop Disc,Fabric Disc */
struct completion *disc_completion;
atomic_t disc_ref_cnt;
struct list_head list_busy_rports; /* Busy RPort list */
struct list_head list_delete_rports; /* Delete RPort list */
struct list_head list_destroy_rports;
spinlock_t rport_busy_pool_lock;
struct unf_lport_s *lport;
enum unf_disc_state_e en_states;
struct delayed_work disc_work;
/* Disc operation template */
struct unf_cm_disc_mg_template_s unf_disc_temp;
/* UNF_INIT_DISC/UNF_RSCN_DISC */
unsigned int disc_option;
/* RSCN list */
struct unf_rscn_mg_s rscn_mgr;
struct unf_disc_rport_mg_s disc_rport_mgr;
struct unf_disc_manage_info_s disc_thread_info;
unsigned long long last_disc_jiff;
};
enum unf_service_item_e {
UNF_SERVICE_ITEM_FLOGI = 0,
UNF_SERVICE_ITEM_PLOGI,
UNF_SERVICE_ITEM_PRLI,
UNF_SERVICE_ITEM_RSCN,
UNF_SERVICE_ITEM_ABTS,
UNF_SERVICE_ITEM_PDISC,
UNF_SERVICE_ITEM_ADISC,
UNF_SERVICE_ITEM_LOGO,
UNF_SERVICE_ITEM_SRR,
UNF_SERVICE_ITEM_RRQ,
UNF_SERVICE_ITEM_ECHO,
UNF_SERVICE_ITEM_RLS,
UNF_SERVICE_BUTT
};
/* Link service counter */
struct unf_link_service_collect_s {
unsigned long long service_cnt[UNF_SERVICE_BUTT];
};
struct unf_pcie_error_count_s {
unsigned int pcie_error_count[UNF_PCIE_BUTT];
};
#define INVALID_WWPN 0
enum unf_device_scsi_state_e {
UNF_SCSI_ST_INIT = 0,
UNF_SCSI_ST_OFFLINE,
UNF_SCSI_ST_ONLINE,
UNF_SCSI_ST_DEAD,
UNF_SCSI_ST_BUTT
};
struct unf_wwpn_dfx_counter_info_s {
atomic64_t io_done_cnt[UNF_MAX_IO_RETURN_VALUE];
atomic64_t scsi_cmd_cnt[UNF_MAX_SCSI_CMD];
atomic64_t target_busy;
atomic64_t host_busy;
atomic_t error_handle[UNF_SCSI_ERROR_HANDLE_BUTT];
atomic_t error_handle_result[UNF_SCSI_ERROR_HANDLE_BUTT];
atomic_t device_alloc;
atomic_t device_destroy;
};
#define UNF_MAX_LUN_PER_TARGET 256
struct unf_wwpn_rport_info_s {
unsigned long long wwpn;
struct unf_rport_s *rport; /* Rport which linkup */
void *lport; /* Lport */
unsigned int target_id; /* target_id distribute by scsi */
unsigned int last_en_scis_state;
atomic_t en_scsi_state;
struct unf_wwpn_dfx_counter_info_s *dfx_counter;
struct delayed_work loss_tmo_work;
int b_need_scan;
struct list_head fc_lun_list;
};
struct unf_rport_scsi_id_image_s {
spinlock_t scsi_image_table_lock;
/* ScsiId Wwpn table */
struct unf_wwpn_rport_info_s *wwn_rport_info_table;
unsigned int max_scsi_id;
};
enum unf_lport_dirty_flag_e {
UNF_LPORT_DIRTY_FLAG_NONE = 0,
UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY = 0x100,
UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY = 0x200,
UNF_LPORT_DIRTY_FLAG_DISC_DIRTY = 0x400,
UNF_LPORT_DIRTY_FLAG_BUTT
};
typedef struct unf_rport_s *(*pfn_unf_rport_set_qualifier)(
struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport_by_nport_id,
struct unf_rport_s *v_rport_by_wwpn,
unsigned long long v_wwpn,
unsigned int v_sid);
typedef unsigned int (*pfn_unf_tmf_status_recovery)(void *v_rport,
void *v_xchg);
enum unf_start_work_state_e {
UNF_START_WORK_STOP,
UNF_START_WORK_BEGIN,
UNF_START_WORK_COMPLETE
};
struct unf_ini_private_info_s {
unsigned int driver_type; /* Driver Type */
void *lower; /* driver private pointer */
};
struct unf_product_hosts_info_s {
void *p_tgt_host;
unf_scsi_host_s *p_scsi_host;
struct unf_ini_private_info_s drv_private_info;
unf_scsi_host_s scsi_host;
};
struct unf_lport_s {
unsigned int port_type; /* Port Type: fc */
atomic_t lport_ref_cnt; /* LPort reference counter */
void *fc_port; /* hard adapter hba pointer */
void *rport; /* Used for SCSI interface */
void *vport;
struct unf_product_hosts_info_s host_info; /* scsi host mg */
struct unf_rport_scsi_id_image_s rport_scsi_table;
int b_port_removing;
int b_port_dir_exchange;
spinlock_t xchg_mgr_lock;
struct list_head list_xchg_mgr_head;
struct list_head list_dirty_xchg_mgr_head;
void *p_xchg_mgr[UNF_EXCHG_MGR_NUM];
enum int_e b_priority;
struct list_head list_vports_head; /* Vport Mg */
struct list_head list_intergrad_vports; /* Vport intergrad list */
struct list_head list_destroy_vports; /* Vport destroy list */
/* VPort entry, hook in list_vports_head */
struct list_head entry_vport;
struct list_head entry_lport; /* LPort entry */
spinlock_t lport_state_lock; /* UL Port Lock */
struct unf_disc_s disc; /* Disc and rport Mg */
/* rport pool,Vport share Lport pool */
struct unf_rport_pool_s rport_pool;
struct unf_esgl_pool_s esgl_pool; /* external sgl pool */
unsigned int port_id; /* Port Management ,0x11000 etc. */
enum unf_lport_login_state_e en_states;
unsigned int link_up;
unsigned int speed;
unsigned long long node_name;
unsigned long long port_name;
unsigned long long fabric_node_name;
unsigned int nport_id;
unsigned int max_frame_size;
unsigned int ed_tov;
unsigned int ra_tov;
unsigned int rr_tov;
unsigned int options; /* ini or tgt */
unsigned int retries;
unsigned int max_retry_count;
enum unf_act_topo_e en_act_topo;
enum int_e b_switch_state; /* 1---->ON,FALSE---->OFF */
enum int_e b_bbscn_support; /* 1---->ON,FALSE---->OFF */
enum unf_start_work_state_e en_start_work_state;
/* Xchg Mg operation template */
struct unf_cm_xchg_mgr_template_s xchg_mgr_temp;
struct unf_cm_lport_template_s lport_mgr_temp;
struct unf_low_level_function_op_s low_level_func;
struct unf_event_mgr event_mgr; /* Disc and rport Mg */
struct delayed_work retry_work; /* poll work or delay work */
struct workqueue_struct *link_event_wq;
struct workqueue_struct *xchg_wq;
struct unf_err_code_s err_code_sum; /* Error code counter */
struct unf_link_service_collect_s link_service_info;
struct unf_pcie_error_count_s pcie_error_cnt;
pfn_unf_rport_set_qualifier pfn_unf_qualify_rport; /* Qualify Rport */
/* tmf marker recovery */
pfn_unf_tmf_status_recovery pfn_unf_tmf_abnormal_recovery;
struct delayed_work route_timer_work; /* L_Port timer route */
unsigned short vp_index; /* Vport Index, Lport:0 */
struct unf_vport_pool_s *vport_pool; /* Only for Lport */
void *root_lport; /* Point to physic Lport */
struct completion *lport_free_completion; /* Free LPort Completion */
#define UNF_LPORT_NOP 1
#define UNF_LPORT_NORMAL 0
atomic_t port_no_operater_flag;
unsigned int enhanced_features; /* Enhanced Features */
unsigned int destroy_step;
unsigned int dirty_flag;
struct unf_lport_sfp_info sfp_info;
struct unf_chip_manage_info_s *chip_info;
#define UNF_LOOP_BACK_TESTING 1
#define UNF_LOOP_BACK_TEST_END 0
unsigned char sfp_power_fault_count;
unsigned char sfp_9545_fault_count;
unsigned long long last_tx_fault_jif; /* SFP last tx fault jiffies */
/* Server card: UNF_FC_SERVER_BOARD_32_G(6)for 32G mode,
* UNF_FC_SERVER_BOARD_16_G(7)for 16G mode
*/
unsigned int card_type;
atomic_t scsi_session_add_success;
atomic_t scsi_session_add_failed;
atomic_t scsi_session_del_success;
atomic_t scsi_session_del_failed;
atomic_t add_start_work_failed;
atomic_t add_closing_work_failed;
atomic_t device_alloc;
atomic_t device_destroy;
atomic_t session_loss_tmo;
atomic_t alloc_scsi_id;
atomic_t resume_scsi_id;
atomic_t reuse_scsi_id;
atomic64_t last_exchg_mgr_idx;
atomic64_t exchg_index;
unsigned int pcie_link_down_cnt;
int b_pcie_linkdown;
unsigned char fw_version[HIFC_VER_LEN];
atomic_t link_lose_tmo;
atomic_t err_code_obtain_freq;
};
void unf_lport_stat_ma(struct unf_lport_s *v_lport,
enum unf_lport_event_e v_event);
void unf_lport_error_recovery(struct unf_lport_s *v_lport);
void unf_set_lport_state(struct unf_lport_s *v_lport,
enum unf_lport_login_state_e v_states);
void unf_init_portparms(struct unf_lport_s *v_lport);
unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport);
void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport);
unsigned int unf_init_disc_mgr(struct unf_lport_s *v_pst_lport);
unsigned int unf_init_lport_route(struct unf_lport_s *v_lport);
void unf_destroy_lport_route(struct unf_lport_s *v_lport);
void unf_reset_lport_params(struct unf_lport_s *v_lport);
void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport,
enum unf_lport_dirty_flag_e v_etype);
struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport,
unsigned short v_vp_index);
struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport,
unsigned int v_did);
struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport,
unsigned long long v_wwpn);
void unf_cm_vport_remove(struct unf_lport_s *v_vport);
#endif

1486
hifc/unf_npiv.c Normal file

File diff suppressed because it is too large Load Diff

49
hifc/unf_npiv.h Normal file
View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __NPIV_H__
#define __NPIV_H__
/* product VPORT configure */
struct vport_config_s {
unsigned long long node_name;
unsigned long long port_name;
unsigned int port_mode; /* INI, TGT or both */
};
/* product Vport function */
#define PORTID_VPINDEX_MASK 0xff000000
#define PORTID_VPINDEX_SHIT 24
unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn);
struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport,
struct vport_config_s *v_vport_config);
unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index);
/* Vport pool creat and release function */
unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport);
void unf_free_vport_pool(struct unf_lport_s *v_lport);
/* Lport resigster stLPortMgTemp function */
void unf_vport_remove(void *v_vport);
void unf_vport_ref_dec(struct unf_lport_s *v_vport);
/* linkdown all Vport after receive linkdown event */
void unf_linkdown_all_vports(void *v_lport);
/* Lport receive Flogi Acc linkup all Vport */
void unf_linkup_all_vports(struct unf_lport_s *v_lport);
/* Lport remove delete all Vport */
void unf_destroy_all_vports(struct unf_lport_s *v_lport);
void unf_vport_fabric_logo(struct unf_lport_s *v_vport);
unsigned int unf_destroy_one_vport(struct unf_lport_s *v_vport);
struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *v_lport,
unsigned long long v_wwpn);
unsigned int unf_drop_vport(struct unf_lport_s *v_vport);
void unf_link_down_one_vport(struct unf_lport_s *v_vport);
void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index);
void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id);
void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did);
void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn);
#endif

5565
hifc/unf_portman.c Normal file

File diff suppressed because it is too large Load Diff

305
hifc/unf_portman.h Normal file
View File

@ -0,0 +1,305 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_PORT_MAN_H__
#define __UNF_PORT_MAN_H__
#define UNF_LPORT_POLL_TIMER ((unsigned int)(1 * 1000))
#define UNF_MAX_BBSCN_VALUE 14
#define UNF_SAVA_INFO_MODE 0
#define UNF_CLEAN_INFO_MODE 1
#define FC_DRIVE_ACTION_CHECK(condition, fail_do0, fail_do1, return) \
do { \
if (condition) { \
fail_do0; \
fail_do1; \
return; \
} \
} while (0)
/* Used in hifcadm tool */
#define UNF_ENABLE_DIF_DIX_PROT 1
#define UNF_ENABLE_DIF_PROT 2
#define UNF_ENABLE_DIX_PROT 3
#define UNF_DISABLE_IP_CHECKSUM 0
#define UNF_ENABLE_IP_CHECKSUM 1
#define UNF_APP_REF_ESC_BOTH_NOT_CHECK 0
#define UNF_APP_ESC_CHECK 1
#define UNF_REF_ESC_CHECK 2
#define UNF_APP_REF_ESC_BOTH_CHECK 3
struct unf_global_card_thread_s {
struct list_head list_card_list_head;
spinlock_t global_card_list_lock;
unsigned int card_sum;
};
/* Global L_Port MG,manage all L_Port */
struct unf_global_lport_s {
struct list_head list_lport_list_head;
/* Temporary list,used in hold list traverse */
struct list_head list_intergrad_head;
/* destroy list,used in card remove */
struct list_head list_destroy_head;
/* Dirty list,abnormal port */
struct list_head list_dirty_head;
spinlock_t global_lport_list_lock;
unsigned int lport_sum;
unsigned char dft_mode;
int b_start_work;
};
struct unf_reset_port_argin {
unsigned int port_id;
};
struct unf_get_topo_argout {
unsigned int *topo_cfg;
enum unf_act_topo_e *en_act_topo;
};
struct unf_set_topo_argin {
unsigned int port_id;
unsigned int topo;
};
struct unf_set_bbscn_argin {
unsigned int port_id;
unsigned int bb_scn;
};
struct unf_set_sfp_argin {
unsigned int port_id;
int turn_on;
};
struct unf_set_speed_argin {
unsigned int port_id;
unsigned int *speed;
};
struct unf_get_sfp_argout {
unsigned int *status;
union unf_sfp_eeprome_info *sfp_info;
};
struct unf_get_allinfo_argout {
unsigned int *out_size;
unsigned int in_size;
void *out_buf;
void *in_buf;
void *lport;
};
struct unf_port_action_s {
unsigned int action;
unsigned int (*fn_unf_action)(struct unf_lport_s *v_lport,
void *v_input);
};
struct unf_hifcadm_action_s {
unsigned int hifc_action;
int (*fn_unf_hifc_action)(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input);
};
struct unf_lport_info {
#define NPIVMAX 255
unsigned int port_id;
unsigned int options;
int b_start_work;
unsigned int phy_link;
unsigned int link_up;
unsigned int act_speed;
unsigned int cfg_speed;
unsigned int tape_support;
unsigned long long port_name;
unsigned int msi;
unsigned int ini_io_retry_timeout;
unsigned int support_max_npiv_num;
unsigned int act_topo;
unsigned int port_topology;
unsigned int fc_ser_max_speed;
unsigned int loss_of_signal_count;
unsigned int bad_rx_char_count;
unsigned int loss_of_sync_count;
unsigned int link_fail_count;
unsigned int rx_eo_fa_count;
unsigned int dis_frame_count;
unsigned int bad_crc_count;
unsigned int proto_error_count;
unsigned int cfg_led_mode;
unsigned char chip_type;
unsigned char vport_num;
unsigned short rsvd1;
unsigned int vport_id[NPIVMAX];
unsigned int nport_id;
};
struct unf_admin_msg_head {
unsigned int size;
unsigned short status;
unsigned char success_num;
unsigned char rsvd;
};
#define UNF_PORT_INFO_SIZE 10
struct unf_adm_cmd {
struct unf_admin_msg_head msg_head;
unsigned int arg[UNF_PORT_INFO_SIZE];
};
struct unf_adm_xchg {
unsigned int aborted;
unsigned int ini_busy;
unsigned int tgt_busy;
unsigned int delay;
unsigned int free;
unsigned int wait;
unsigned int sfs_free;
unsigned int sfs_busy;
};
enum unf_admin_msg_status_e {
UNF_ADMIN_MSG_DONE = 0,
UNF_ADMIN_MSG_INCOMPLETE,
UNF_ADMIN_MSG_FAILED,
UNF_ADMIN_MSG_BUTT
};
/* the structure define with fc unf driver */
enum fc_dfx_io_count_type_e {
FC_HOST_COUNTER = 0,
FC_HOST_SCSI_CMD_IN_TOTAL,
FC_HOST_SCSI_CMD_DONE_TOTAL,
FC_SESSION_COUNTER,
FC_SESSION_SCSI_CMD_IN,
FC_SESSION_SCSI_CMD_DONE,
FC_SRB_COUNT,
};
enum unf_msg_format_e {
UNF_PORT_SET_OP = 1,
UNF_TOPO_SET_OP,
UNF_SPEED_SET_OP,
UNF_INFO_GET_OP,
UNF_INFO_CLEAR_OP,
UNF_SFP_INFO_OP,
UNF_DFX,
UNF_FEC_SET = 8,
UNF_BBSCN,
UNF_VPORT,
UNF_LINK_DELAY = 11,
UNF_DIF,
UNF_DIF_CONFIG = 14,
UNF_SAVA_DATA,
UNF_SHOW_XCHG = 23,
UNF_PORTSTAT = 24,
UNF_ALL_INFO_OP = 25,
FC_LINK_TMO_OPT = 26,
FC_DRV_LOG_OPT = 27,
UNF_COMPAT_TEST = 0xFF
};
struct unf_save_info_head_s {
unsigned int opcode : 4;
unsigned int type : 4;
unsigned int entry_num : 8;
unsigned int next : 16;
};
enum unf_save_info_type_e {
UNF_SESSION_QOS = 0,
UNF_PORT_BASE_INFO = 2,
UNF_SAVE_TYPE_BUTT,
};
struct unf_link_tmo_opt_s {
struct unf_admin_msg_head head;
unsigned int link_opt;
int tmo_value;
unsigned int sync_all_port;
};
struct unf_log_level_opt_s {
struct unf_admin_msg_head head;
unsigned int log_opt;
unsigned int log_level;
unsigned int log_fre_qunce;
};
extern struct unf_global_lport_s global_lport_mgr;
extern struct unf_global_card_thread_s card_thread_mgr;
extern struct workqueue_struct *unf_work_queue;
struct unf_lport_s *unf_find_lport_by_port_id(unsigned int v_port_id);
struct unf_lport_s *unf_find_lport_by_scsi_host_id(unsigned int scsi_host_id);
void *unf_lport_create_and_init(
void *private_data,
struct unf_low_level_function_op_s *low_level_op);
int unf_cm_reset_port(unsigned int v_port_id);
int unf_cm_sfp_switch(unsigned int v_port_id, int v_bturn_on);
int unf_cm_get_sfp_info(unsigned int v_port_id, unsigned int *v_status,
union unf_sfp_eeprome_info *v_sfp_info,
unsigned int *sfp_type);
int unf_cm_set_port_bbscn(unsigned int v_port_id, unsigned int v_bbscn);
int unf_cm_set_port_topo(unsigned int v_port_id, unsigned int v_topo);
int unf_cm_get_port_topo(unsigned int v_port_id,
unsigned int *v_topo_cfg,
enum unf_act_topo_e *v_en_act_topo);
int unf_cm_clear_port_error_code_sum(unsigned int v_port_id);
unsigned int unf_fc_port_link_event(void *v_lport, unsigned int v_events,
void *v_input);
unsigned int unf_release_local_port(void *v_lport);
void unf_lport_route_work(struct work_struct *v_work);
void unf_lport_update_topo(struct unf_lport_s *v_lport,
enum unf_act_topo_e v_enactive_topo);
void unf_lport_ref_dec(struct unf_lport_s *v_lport);
unsigned int unf_lport_refinc(struct unf_lport_s *v_lport);
void unf_lport_ref_dec_to_destroy(struct unf_lport_s *v_lport);
int unf_send_event(unsigned int port_id, unsigned int syn_flag,
void *argc_in, void *argc_out,
int (*p_func)(void *argc_in, void *argc_out));
void unf_port_mgmt_deinit(void);
void unf_port_mgmt_init(void);
int unf_cm_echo_test(unsigned int v_port_id, unsigned int v_nport_id,
unsigned int *v_link_delay);
void unf_show_dirty_port(int v_show_only, unsigned int *v_ditry_port_num);
unsigned int unf_get_error_code_sum(struct unf_lport_s *v_lport,
struct unf_err_code_s *v_fc_err_code);
int unf_cm_set_port_speed(unsigned int v_port_id, unsigned int *v_speed);
void *unf_lookup_lport_by_nport_id(void *v_lport, unsigned int v_nport_id);
int unf_cmd_adm_handler(void *v_lport, struct unf_hinicam_pkg *v_input);
unsigned int unf_is_lport_valid(struct unf_lport_s *v_lport);
unsigned int unf_cm_save_port_info(unsigned int v_port_id);
unsigned int unf_cm_get_save_info(struct unf_lport_s *v_lport);
unsigned int unf_cm_clear_flush(unsigned int v_port_id);
int unf_lport_reset_port(struct unf_lport_s *v_lport, unsigned int v_flag);
unsigned int unf_register_scsi_host(struct unf_lport_s *v_lport);
void unf_unregister_scsi_host(struct unf_lport_s *v_lport);
int unf_get_link_lose_tmo(struct unf_lport_s *v_lport);
int unf_set_link_lose_tmo(struct unf_lport_s *v_lport, int time_out);
void unf_init_link_lose_tmo(struct unf_lport_s *v_lport);
int unf_set_link_lose_tmo_to_all(int time_out);
void unf_destroy_scsi_id_table(struct unf_lport_s *v_lport);
unsigned int unf_lport_login(struct unf_lport_s *v_lport,
enum unf_act_topo_e v_en_act_topo);
unsigned int unf_init_scsi_id_table(struct unf_lport_s *v_lport);
void unf_set_lport_removing(struct unf_lport_s *v_lport);
void unf_lport_release_lw_fun_op(struct unf_lport_s *v_lport);
void unf_disc_state_ma(struct unf_lport_s *v_lport,
enum unf_disc_event_e v_event);
unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport);
void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport);
#endif

2430
hifc/unf_rport.c Normal file

File diff suppressed because it is too large Load Diff

284
hifc/unf_rport.h Normal file
View File

@ -0,0 +1,284 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_RPORT_H
#define __UNF_RPORT_H
#define UNF_MAX_SCSI_ID 2048
#define UNF_LOSE_TMO 30
#define UNF_RPORT_INVALID_INDEX 0xffff
/* RSCN compare DISC list with local RPort macro */
#define UNF_RPORT_NEED_PROCESS 0x1
#define UNF_RPORT_ONLY_IN_DISC_PROCESS 0x2
#define UNF_RPORT_ONLY_IN_LOCAL_PROCESS 0x3
#define UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS 0x4
#define UNF_RPORT_NOT_NEED_PROCESS 0x5
#define UNF_ECHO_SEND_MAX_TIMES 1
extern struct unf_rport_feature_pool_s *port_fea_pool;
enum unf_rport_login_state_e {
UNF_RPORT_ST_INIT = 0x1000, /* initialized */
UNF_RPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */
UNF_RPORT_ST_PRLI_WAIT, /* waiting for PRLI completion */
UNF_RPORT_ST_READY, /* ready for use */
UNF_RPORT_ST_LOGO, /* port logout sent */
UNF_RPORT_ST_CLOSING, /* being closed */
UNF_RPORT_ST_DELETE, /* port being deleted */
UNF_RPORT_ST_BUTT
};
enum unf_rport_event_e {
UNF_EVENT_RPORT_NORMAL_ENTER = 0x9000,
UNF_EVENT_RPORT_ENTER_PLOGI = 0x9001,
UNF_EVENT_RPORT_ENTER_PRLI = 0x9002,
UNF_EVENT_RPORT_READY = 0x9003,
UNF_EVENT_RPORT_LOGO = 0x9004,
UNF_EVENT_RPORT_CLS_TIMEOUT = 0x9005,
UNF_EVENT_RPORT_RECOVERY = 0x9006,
UNF_EVENT_RPORT_RELOGIN = 0x9007,
UNF_EVENT_RPORT_LINK_DOWN = 0x9008,
UNF_EVENT_RPORT_BUTT
};
/* RPort local link state */
enum unf_port_state_e {
UNF_PORT_STATE_LINKUP = 0x1001,
UNF_PORT_STATE_LINKDOWN = 0x1002
};
enum unf_rport_reuse_flag_e {
UNF_RPORT_REUSE_ONLY = 0x1001,
UNF_RPORT_REUSE_INIT = 0x1002,
UNF_RPORT_REUSE_RECOVER = 0x1003
};
struct unf_disc_rport_s {
/* RPort entry */
struct list_head entry_rport;
unsigned int nport_id; /* Remote port NPortID */
unsigned int disc_done; /* 1:Disc done */
};
struct unf_rport_feature_pool_s {
struct list_head list_busy_head;
struct list_head list_free_head;
void *p_port_feature_pool_addr;
spinlock_t port_fea_pool_lock;
};
struct unf_rport_feature_recard_s {
struct list_head entry_feature;
unsigned long long wwpn;
unsigned int port_feature;
unsigned int reserved;
};
struct unf_os_thread_private_data_s {
struct list_head list;
spinlock_t spin_lock;
struct task_struct *thread;
unsigned int in_process;
unsigned int cpu_id;
atomic_t user_count;
};
/* Remote Port struct */
struct unf_rport_s {
unsigned int max_frame_size;
unsigned int supported_classes;
/* Dynamic Attributes */
/* Remote Port loss timeout in seconds. */
unsigned int dev_loss_tmo;
unsigned long long node_name;
unsigned long long port_name;
unsigned int nport_id; /* Remote port NPortID */
unsigned int local_nport_id;
unsigned int roles;
/* Remote port local INI state */
enum unf_port_state_e lport_ini_state;
enum unf_port_state_e last_lport_ini_state;
/* Remote port local TGT state */
enum unf_port_state_e lport_tgt_state;
enum unf_port_state_e last_lport_tgt_state;
/* Port Type:fc */
unsigned int port_type;
/* RPort reference counter */
atomic_t rport_ref_cnt;
/* Pending IO count */
atomic_t pending_io_cnt;
/* RPort entry */
struct list_head entry_rport;
/* Port State,delay reclaim when uiRpState == complete. */
enum unf_rport_login_state_e rp_state;
unsigned int disc_done; /* 1:Disc done */
struct unf_lport_s *lport;
void *rport;
spinlock_t rport_state_lock;
/* Port attribution */
unsigned int ed_tov;
unsigned int ra_tov;
unsigned int options; /* ini or tgt */
unsigned int last_report_linkup_options;
unsigned int fcp_conf_needed; /* INI Rport send FCP CONF flag */
unsigned int tape_support_needed; /* INI tape support flag */
unsigned int retries; /* special req retry times */
unsigned int logo_retries; /* logo error recovery retry times */
unsigned int mas_retries; /* special req retry times */
/* Rport alloc jiffies */
unsigned long long rport_alloc_jifs;
void *session;
/* binding with SCSI */
unsigned int scsi_id;
/* disc list compare flag */
unsigned int rscn_position;
unsigned int rport_index;
/* RPort timer,closing status */
struct work_struct closing_work;
/* RPort timer,rport linkup */
struct work_struct start_work;
/* RPort timer,recovery */
struct delayed_work recovery_work;
/* RPort timer,TGT mode,PRLI waiting */
struct delayed_work open_work;
struct semaphore task_sema;
/* Callback after rport Ready/delete.[with state:ok/fail].
* Creat/free TGT session here
* input : L_Port,R_Port,state:ready
* --creat session/delete--free session
*/
void (*pfn_unf_rport_call_back)(void *, void *, unsigned int);
struct unf_os_thread_private_data_s *data_thread;
};
#define UNF_IO_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_result) \
do { \
if (likely(((v_io_result) < UNF_MAX_IO_RETURN_VALUE) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic64_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->io_done_cnt[v_io_result]); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] io return value(0x%x) or scsi_id(0x%x) is invalid", \
v_io_result, v_scsi_id); \
} \
} while (0)
#define UNF_SCSI_CMD_CNT(v_scsi_table, v_scsi_id, v_io_type) \
do { \
if (likely(((v_io_type) < UNF_MAX_SCSI_CMD) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic64_inc(&((v_scsi_table->wwn_rport_info_table[v_scsi_id]).dfx_counter->scsi_cmd_cnt[v_io_type])); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \
v_io_type, v_scsi_id); \
} \
} while (0)
#define UNF_SCSI_ERROR_HANDLE_CNT(v_scsi_table, v_scsi_id, v_io_type) \
do { \
if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle[v_io_type]); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \
v_io_type, v_scsi_id); \
} \
} while (0)
#define UNF_SCSI_ERROR_HANDLE_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_type) \
do { \
if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle_result[v_io_type]); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \
v_io_type, v_scsi_id); \
} \
} while (0)
void unf_rport_state_ma(struct unf_rport_s *v_rport,
enum unf_rport_event_e v_event);
void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int rport_att);
void unf_rport_enter_closing(struct unf_rport_s *v_rport);
void unf_clean_linkdown_rport(struct unf_lport_s *v_lport);
void unf_rport_error_recovery(struct unf_rport_s *v_rport);
struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport,
unsigned int nport_id);
void unf_rport_enter_logo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport);
void unf_rport_ref_dec(struct unf_rport_s *v_rport);
struct unf_rport_s *unf_rport_set_qualifier_key_reuse(
struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport_by_nport_id,
struct unf_rport_s *v_rport_by_wwpn,
unsigned long long v_wwpn,
unsigned int v_sid);
void unf_rport_delay_login(struct unf_rport_s *v_rport);
struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport,
unsigned long long v_wwpn,
unsigned int v_sid);
void unf_rport_linkdown(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
enum unf_rport_reuse_flag_e v_reuse_flag,
unsigned int v_nport_id);
void *unf_rport_get_free_and_init(void *v_lport,
unsigned int v_port_type,
unsigned int v_nport_id);
unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport,
unsigned int v_scsi_id);
void unf_schedule_closing_work(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
void unf_sesion_loss_timeout(struct work_struct *v_work);
unsigned int unf_get_port_feature(unsigned long long v_wwpn);
void unf_update_port_feature(unsigned long long v_wwpn,
unsigned int v_port_feature);
#endif

1578
hifc/unf_scsi.c Normal file

File diff suppressed because it is too large Load Diff

1136
hifc/unf_scsi_common.h Normal file

File diff suppressed because it is too large Load Diff

9875
hifc/unf_service.c Normal file

File diff suppressed because it is too large Load Diff

83
hifc/unf_service.h Normal file
View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_SERVICE_H__
#define __UNF_SERVICE_H__
extern unsigned int max_frame_size;
#define UNF_SET_ELS_ACC_TYPE(v_els_cmd) \
((unsigned int)(v_els_cmd) << 16 | ELS_ACC)
#define UNF_SET_ELS_RJT_TYPE(v_els_cmd) \
((unsigned int)(v_els_cmd) << 16 | ELS_RJT)
unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
unsigned int unf_send_gff_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
unsigned int unf_send_flogi(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_fdisc(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_plogi(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_prli(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_receive_els_pkg(void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg);
unsigned int unf_send_rff_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_rft_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_logo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_echo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int *v_time);
unsigned int unf_send_abts(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg);
unsigned int unf_send_scr(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_rrq(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg);
void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_receive_bls_pkg(void *v_lport,
struct unf_frame_pkg_s *v_pkg);
struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport,
unsigned int v_rport_nport_id,
unsigned long long v_port_name);
void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int alpa);
unsigned int unf_receive_gs_pkg(void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg);
void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport,
unsigned int v_nport_id);
void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport,
unsigned int v_nport_id);
unsigned int unf_release_rport_res(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport);
unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg);
unsigned int unf_send_rec(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg);
typedef int (*unf_evt_task)(void *v_arg_in, void *v_arg_out);
#endif /* __UNF_SERVICE_H__ */