diff --git a/Documentation/networking/device_drivers/ethernet/3snic/sssnic.rst b/Documentation/networking/device_drivers/ethernet/3snic/sssnic.rst new file mode 100644 index 00000000000000..2bf2856bc0f8db --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/3snic/sssnic.rst @@ -0,0 +1,67 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================================================== +Linux Kernel Driver for 3SNIC Intelligent NIC family +==================================================== + +Contents +======== + +- `Overview`_ +- `Supported PCI vendor ID/device IDs`_ +- `Supported features`_ +- `Product specification`_ +- `Support`_ + +Overview: +========= +SSSNIC is a network interface card that can meet the demand of a range +of application scenarios,such as the Data Center Area,cloud computing +and Financial industry,etc. + +The construction of SSSNIC card facilities mainly depends on servers and +switches. 3S910, 920 and 930 are PCIe standard cards adapted to servers, +which provide extended external business interfaces for servers. + +The driver supports a range of link-speed devices (100GE (40GE +compatible) and 25GE (10GE compatible)).A negotiated and extendable +feature set also supported. + +Supported PCI vendor ID/device IDs: +=================================== + +1f3f:9020 - SSSNIC PF + +Supported features: +=================== + +1. Support single-root I/O virtualization (SR-IOV) +2. Support virtual machine multi queue (VMMQ) +3. Support receive side scaling (RSS) +4. Support physical function (PF) passthrough VMs +5. Support the PF promiscuous mode,unicast or multicast MAC filtering, and +all multicast mode +6. Support IPv4/IPv6, checksum offload,TCP Segmentation Offload (TSO), and +Large Receive Offload (LRO) +7. Support in-band one-click logs collection +8. Support loopback tests +9. Support port location indicators + +Product specification +===================== + + =================== ======= ============================= =============================================== + PCI ID (pci.ids) OEM Product PCIe port + =================== ======= ============================= =============================================== + 1F3F:9020 3SNIC 3S910(2 x 25GE SFP28 ports) PCIe Gen3 x8(compatible with Gen2/ Gen1) + 1F3F:9020 3SNIC 3S920(4 x 25GE SFP28 ports) PCIe Gen4 x16, compatible with Gen3/ Gen2/ Gen1 + 1F3F:9020 3SNIC 3S930(2 x 100GE QSFP28 ports) PCIe Gen4 x16, compatible with Gen3/ Gen2/ Gen1 + =================== ======= ============================= =============================================== + + +Support +======= + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +https://www.3snic.com. diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index 6fc1961492b772..a2c5b8fbe52639 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -12,6 +12,7 @@ Contents: 3com/3c509 3com/vortex + 3snic/sssnic.rst amazon/ena altera/altera_tse amd/pds_core diff --git a/MAINTAINERS b/MAINTAINERS index c4de9d5053cb35..0ff07dad542b32 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18743,10 +18743,8 @@ F: sound/soc/pxa/ ARM/PHYTIUM SOC SUPPORT M: Wang Yinfeng S: Maintained -W: https://gerrit.b.cpu.ac/c/linux -F: Documentation/devicetree/bindings/gpio/phytium,gpio.yaml -F: arch/arm64/boot/dts/phytium/* -F: drivers/gpio/gpio-phytium* +W: https://www.phytium.com.cn +F: drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c QAT DRIVER M: Giovanni Cabiddu diff --git a/arch/arm64/configs/deepin_arm64_desktop_defconfig b/arch/arm64/configs/deepin_arm64_desktop_defconfig index 63912e39f94e1f..d6d19b95f2d7d8 100644 --- a/arch/arm64/configs/deepin_arm64_desktop_defconfig +++ b/arch/arm64/configs/deepin_arm64_desktop_defconfig @@ -884,12 +884,15 @@ CONFIG_MTD_ONENAND_2X_PROGRAM=y CONFIG_MTD_RAW_NAND=m CONFIG_MTD_NAND_DENALI_PCI=m CONFIG_MTD_NAND_DENALI_DT=m +CONFIG_MTD_NAND_OMAP2=m +CONFIG_MTD_NAND_OMAP_BCH=y CONFIG_MTD_NAND_CAFE=m CONFIG_MTD_NAND_PHYTIUM_PCI=m CONFIG_MTD_NAND_PHYTIUM_PLAT=m CONFIG_MTD_NAND_MARVELL=m CONFIG_MTD_NAND_BRCMNAND=m CONFIG_MTD_NAND_BRCMNAND_BCM63XX=m +CONFIG_MTD_NAND_GPMI_NAND=m CONFIG_MTD_NAND_FSL_IFC=m CONFIG_MTD_NAND_MXC=m CONFIG_MTD_NAND_SUNXI=m @@ -1305,14 +1308,22 @@ CONFIG_TYPHOON=m CONFIG_ADAPTEC_STARFIRE=m CONFIG_ET131X=m CONFIG_SLICOSS=m +CONFIG_SUN4I_EMAC=m CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y CONFIG_ALTERA_TSE=m -# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_ENA_ETHERNET=m CONFIG_AMD8111_ETH=m CONFIG_PCNET32=m CONFIG_AMD_XGBE=m CONFIG_AMD_XGBE_DCB=y +CONFIG_PDS_CORE=m +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_AQTION=m +CONFIG_EMAC_ROCKCHIP=m +CONFIG_SPI_AX88796C=m +CONFIG_SPI_AX88796C_COMPRESSION=y CONFIG_ATL2=m CONFIG_ATL1=m CONFIG_ATL1E=m @@ -1323,6 +1334,8 @@ CONFIG_BCMGENET=m CONFIG_TIGON3=m CONFIG_BNX2X=m CONFIG_SYSTEMPORT=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y CONFIG_MACB=m CONFIG_MACB_PCI=m CONFIG_THUNDER_NIC_PF=m @@ -1334,9 +1347,12 @@ CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T4_DCB=y CONFIG_CHELSIO_T4_FCOE=y CONFIG_CHELSIO_T4VF=m +CONFIG_CRYPTO_DEV_CHELSIO_TLS=m CONFIG_CHELSIO_IPSEC_INLINE=m +CONFIG_CHELSIO_TLS_DEVICE=m CONFIG_ENIC=m CONFIG_GEMINI_ETHERNET=m +CONFIG_DM9051=m CONFIG_DNET=m CONFIG_NET_TULIP=y CONFIG_DE2104X=m @@ -1350,10 +1366,22 @@ CONFIG_DM9102=m CONFIG_ULI526X=m CONFIG_DL2K=m CONFIG_SUNDANCE=m +CONFIG_TSNEP=m CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=m +CONFIG_FEC=m +CONFIG_FSL_FMAN=m +CONFIG_FSL_XGMAC_MDIO=m +CONFIG_GIANFAR=m +CONFIG_FSL_DPAA2_SWITCH=m +CONFIG_FSL_ENETC=m +CONFIG_FSL_ENETC_VF=m +CONFIG_FSL_ENETC_QOS=y +CONFIG_FUN_ETH=m +CONFIG_GVE=m CONFIG_HIX5HD2_GMAC=m CONFIG_HISI_FEMAC=m CONFIG_HIP04_ETH=m +CONFIG_HI13X1_GMAC=y CONFIG_HNS_DSAF=m CONFIG_HNS_ENET=m CONFIG_HNS3=m @@ -1373,43 +1401,70 @@ CONFIG_I40E_DCB=y CONFIG_I40EVF=m CONFIG_ICE=m CONFIG_FM10K=m +CONFIG_IGC=m CONFIG_JME=m -CONFIG_MVMDIO=m +CONFIG_ADIN1110=m +CONFIG_LITEX_LITEETH=m +CONFIG_MVNETA=m +CONFIG_MVPP2=m +CONFIG_MVPP2_PTP=y +CONFIG_PXA168_ETH=m CONFIG_SKGE=m CONFIG_SKGE_GENESIS=y CONFIG_SKY2=m +CONFIG_OCTEONTX2_AF=m +CONFIG_OCTEONTX2_PF=m +CONFIG_OCTEONTX2_VF=m +CONFIG_OCTEON_EP=m +CONFIG_PRESTERA=m +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NET_MEDIATEK_SOC=m +CONFIG_NET_MEDIATEK_STAR_EMAC=m CONFIG_MLX4_EN=m # CONFIG_MLX4_DEBUG is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_MACSEC=y +CONFIG_MLX5_EN_IPSEC=y +CONFIG_MLX5_EN_TLS=y +CONFIG_MLX5_SF=y CONFIG_MLXSW_CORE=m +CONFIG_MLXBF_GIGE=m +CONFIG_KS8842=m +CONFIG_KS8851=m CONFIG_KS8851_MLL=m CONFIG_KSZ884X_PCI=m CONFIG_ENC28J60=m CONFIG_ENC28J60_WRITEVERIFY=y CONFIG_ENCX24J600=m CONFIG_LAN743X=m +CONFIG_LAN966X_SWITCH=m +CONFIG_SPARX5_SWITCH=m CONFIG_YT6801=m CONFIG_MSCC_OCELOT_SWITCH=m CONFIG_MYRI10GE=m CONFIG_FEALNX=m +CONFIG_NI_XGE_MANAGEMENT_ENET=m CONFIG_NATSEMI=m CONFIG_NS83820=m CONFIG_S2IO=m -# CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_NFP=m CONFIG_NE2K_PCI=m CONFIG_FORCEDETH=m CONFIG_ETHOC=m CONFIG_HAMACHI=m CONFIG_YELLOWFIN=m +CONFIG_IONIC=m CONFIG_QLA3XXX=m CONFIG_QLCNIC=m CONFIG_NETXEN_NIC=m CONFIG_QED=m CONFIG_QEDE=m CONFIG_BNA=m +CONFIG_QCA7000_SPI=m +CONFIG_QCA7000_UART=m CONFIG_QCOM_EMAC=m CONFIG_RMNET=m CONFIG_R6040=m @@ -1420,6 +1475,9 @@ CONFIG_8139TOO_TUNE_TWISTER=y CONFIG_8139TOO_8129=y CONFIG_8139_OLD_RX_RESET=y CONFIG_R8169=m +CONFIG_SH_ETH=m +CONFIG_RAVB=m +CONFIG_RENESAS_ETHER_SWITCH=m CONFIG_ROCKER=m CONFIG_SXGBE_ETH=m CONFIG_SC92031=m @@ -1427,10 +1485,13 @@ CONFIG_SIS900=m CONFIG_SIS190=m CONFIG_SFC=m CONFIG_SFC_FALCON=m +CONFIG_SFC_SIENA=m CONFIG_SMC91X=m CONFIG_EPIC100=m CONFIG_SMSC911X=m CONFIG_SMSC9420=m +CONFIG_SNI_AVE=m +CONFIG_SNI_NETSEC=m CONFIG_STMMAC_ETH=y CONFIG_DWMAC_DWC_QOS_ETH=m CONFIG_DWMAC_MEDIATEK=m @@ -1445,7 +1506,13 @@ CONFIG_NIU=m CONFIG_DWC_XLGMAC=m CONFIG_DWC_XLGMAC_PCI=m CONFIG_TEHUTI=m +CONFIG_TI_CPSW_PHY_SEL=y +CONFIG_TI_K3_AM65_CPSW_NUSS=m +CONFIG_TI_K3_AM65_CPSW_SWITCHDEV=y +CONFIG_TI_K3_AM65_CPTS=m +CONFIG_TI_AM65_CPSW_TAS=y CONFIG_TLAN=m +CONFIG_MSE102X=m CONFIG_VIA_RHINE=m CONFIG_VIA_RHINE_MMIO=y CONFIG_VIA_VELOCITY=m @@ -1453,10 +1520,14 @@ CONFIG_NGBE=m CONFIG_TXGBE=m CONFIG_WIZNET_W5100=m CONFIG_WIZNET_W5300=m -CONFIG_WIZNET_BUS_DIRECT=y +CONFIG_WIZNET_W5100_SPI=m +CONFIG_XILINX_EMACLITE=m +CONFIG_XILINX_AXI_EMAC=m +CONFIG_XILINX_LL_TEMAC=m CONFIG_PHYTMAC=m CONFIG_PHYTMAC_PLATFORM=m CONFIG_PHYTMAC_PCI=m +CONFIG_GRTNIC=m CONFIG_FDDI=m CONFIG_DEFXX=m CONFIG_SKFP=m @@ -1522,7 +1593,6 @@ CONFIG_CAN_KVASER_USB=m CONFIG_CAN_MCBA_USB=m CONFIG_CAN_PEAK_USB=m CONFIG_CAN_UCAN=m -CONFIG_MDIO_BITBANG=m CONFIG_MDIO_GPIO=m CONFIG_MDIO_HISI_FEMAC=y CONFIG_MDIO_MSCC_MIIM=m @@ -3793,6 +3863,12 @@ CONFIG_EXTCON_PALMAS=m CONFIG_EXTCON_RT8973A=m CONFIG_EXTCON_SM5502=m CONFIG_ARM_PL172_MPMC=m +CONFIG_BRCMSTB_DPFE=m +CONFIG_BRCMSTB_MEMC=m +CONFIG_OMAP_GPMC=m +CONFIG_FSL_IFC=y +CONFIG_RENESAS_RPCIF=m +CONFIG_STM32_FMC2_EBI=m CONFIG_IIO=m CONFIG_IIO_BUFFER_CB=m CONFIG_IIO_BUFFER_DMAENGINE=m @@ -3818,6 +3894,7 @@ CONFIG_PHYTIUM_IXIC=y CONFIG_IPACK_BUS=m CONFIG_BOARD_TPCI200=m CONFIG_SERIAL_IPOCTAL=m +# CONFIG_RESET_MCHP_SPARX5 is not set CONFIG_BCM_KONA_USB2_PHY=m CONFIG_PHY_HI6220_USB=m CONFIG_PHY_HISTB_COMBPHY=m diff --git a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig index 24341e149dcae2..ad9a3c14146225 100644 --- a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig +++ b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig @@ -1452,6 +1452,7 @@ CONFIG_XILINX_EMACLITE=m CONFIG_XILINX_AXI_EMAC=m CONFIG_XILINX_LL_TEMAC=m CONFIG_PCMCIA_XIRC2PS=m +CONFIG_GRTNIC=m CONFIG_FDDI=m CONFIG_DEFXX=m CONFIG_SKFP=m @@ -5669,8 +5670,6 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_FAT_DEFAULT_UTF8=y CONFIG_EXFAT_FS=y -CONFIG_NTFS_FS=m -CONFIG_NTFS_RW=y CONFIG_NTFS3_FS=y CONFIG_NTFS3_LZX_XPRESS=y CONFIG_NTFS3_FS_POSIX_ACL=y diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index b5520b60edd419..b210edc9a3d6fd 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -1246,7 +1246,10 @@ CONFIG_PCNET32=m CONFIG_PCMCIA_NMCLAN=m CONFIG_AMD_XGBE=m CONFIG_AMD_XGBE_DCB=y +CONFIG_PDS_CORE=m CONFIG_AQTION=m +CONFIG_SPI_AX88796C=m +CONFIG_SPI_AX88796C_COMPRESSION=y CONFIG_ATL2=m CONFIG_ATL1=m CONFIG_ATL1E=m @@ -1284,7 +1287,9 @@ CONFIG_ULI526X=m CONFIG_PCMCIA_XIRCOM=m CONFIG_DL2K=m CONFIG_SUNDANCE=m +CONFIG_TSNEP=m CONFIG_PCMCIA_FMVJ18X=m +CONFIG_FUN_ETH=m CONFIG_GVE=m CONFIG_HINIC=m CONFIG_E100=m @@ -1313,8 +1318,10 @@ CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_MACSEC=y CONFIG_MLX5_EN_IPSEC=y CONFIG_MLX5_EN_TLS=y +CONFIG_MLX5_SF=y CONFIG_MLXSW_CORE=m CONFIG_KS8842=m CONFIG_KS8851=m @@ -1323,7 +1330,9 @@ CONFIG_KSZ884X_PCI=m CONFIG_ENC28J60=m CONFIG_ENCX24J600=m CONFIG_LAN743X=m +CONFIG_VCAP=y CONFIG_YT6801=m +CONFIG_MICROSOFT_MANA=m CONFIG_MYRI10GE=m CONFIG_FEALNX=m CONFIG_NI_XGE_MANAGEMENT_ENET=m @@ -1351,6 +1360,7 @@ CONFIG_R6040=m CONFIG_ATP=m CONFIG_8139CP=m CONFIG_8139TOO=m +CONFIG_8139TOO_TUNE_TWISTER=y CONFIG_8139TOO_8129=y CONFIG_R8169=m CONFIG_ROCKER=m @@ -1360,11 +1370,14 @@ CONFIG_SIS900=m CONFIG_SIS190=m CONFIG_SFC=m CONFIG_SFC_FALCON=m +CONFIG_SFC_SIENA=m +CONFIG_SFC_SIENA_SRIOV=y CONFIG_PCMCIA_SMC91C92=m CONFIG_EPIC100=m CONFIG_SMSC911X=m CONFIG_SMSC9420=m CONFIG_STMMAC_ETH=m +CONFIG_DWMAC_PHYTIUM=m CONFIG_STMMAC_PCI=m CONFIG_HAPPYMEAL=m CONFIG_SUNGEM=m @@ -1374,6 +1387,7 @@ CONFIG_DWC_XLGMAC=m CONFIG_DWC_XLGMAC_PCI=m CONFIG_TEHUTI=m CONFIG_TLAN=m +CONFIG_MSE102X=m CONFIG_VIA_RHINE=m CONFIG_VIA_RHINE_MMIO=y CONFIG_VIA_VELOCITY=m @@ -1382,9 +1396,12 @@ CONFIG_TXGBE=m CONFIG_WIZNET_W5100=m CONFIG_WIZNET_W5300=m CONFIG_WIZNET_W5100_SPI=m +CONFIG_XILINX_EMACLITE=m CONFIG_XILINX_AXI_EMAC=m CONFIG_XILINX_LL_TEMAC=m CONFIG_PCMCIA_XIRC2PS=m +# CONFIG_NET_VENDOR_PHYTIUM is not set +CONFIG_GRTNIC=m CONFIG_FDDI=y CONFIG_DEFXX=m CONFIG_SKFP=m diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c index b5852e69ec6a4d..244b331cecd792 100644 --- a/arch/x86/crypto/sm3-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/crypto/sm4_zhaoxin_gmi.c b/arch/x86/crypto/sm4_zhaoxin_gmi.c index a0645fafe6af44..dd5713af7048dc 100644 --- a/arch/x86/crypto/sm4_zhaoxin_gmi.c +++ b/arch/x86/crypto/sm4_zhaoxin_gmi.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -776,7 +776,7 @@ static int __init gmi_sm4_init(void) algname = sm4_algs[i].base.cra_name + 2; drvname = sm4_algs[i].base.cra_driver_name + 2; basename = sm4_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(sm4_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 25a79a1e13e84d..d5694412935a2f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -146,6 +146,8 @@ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS ( 5*32+ 4) /* "sm3 sm4" present */ +#define X86_FEATURE_CCS_EN ( 5*32+ 5) /* "sm3_en sm4_en" enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* "ace2" Advanced Cryptography Engine v2 */ @@ -154,6 +156,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* "phe_en" PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* "pmm" PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* "pmm_en" PMM enabled */ +#define X86_FEATURE_ZX_FMA ( 5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX ( 5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN ( 5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS ( 5*32+18) /* Overstress for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN ( 5*32+19) /* Overstress for auto overclock enabled */ +#define X86_FEATURE_TM3 ( 5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN ( 5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 ( 5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN ( 5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM ( 5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 ( 5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN ( 5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX ( 5*32+27) /* "rsa" XMODEXP and MONTMUL2 are present */ +#define X86_FEATURE_XMODX_EN ( 5*32+28) /* "rsa_en" XMODEXP and MONTMUL2 are enabled */ +#define X86_FEATURE_VEX ( 5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN ( 5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK ( 5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* "lahf_lm" LAHF/SAHF in long mode */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0ba285e1c76b91..2ddc3c54ebf61f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -331,6 +331,7 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_UMWAIT_CONTROL, + MSR_ZX_PAUSE_CONTROL, MSR_IA32_XFD, MSR_IA32_XFD_ERR, }; @@ -1617,179 +1618,6 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); -/* - * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track - * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS, - * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that - * require host support, i.e. should be probed via RDMSR. emulated_msrs holds - * MSRs that KVM emulates without strictly requiring host support. - * msr_based_features holds MSRs that enumerate features, i.e. are effectively - * CPUID leafs. Note, msr_based_features isn't mutually exclusive with - * msrs_to_save and emulated_msrs. - */ - -static const u32 msrs_to_save_base[] = { - MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, - MSR_STAR, -#ifdef CONFIG_X86_64 - MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, -#endif - MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, - MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL, - MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, - MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, - MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, - MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, - MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, - MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, - MSR_IA32_UMWAIT_CONTROL, - MSR_ZX_PAUSE_CONTROL, - - MSR_IA32_XFD, MSR_IA32_XFD_ERR, -}; - -static const u32 msrs_to_save_pmu[] = { - MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, - MSR_ARCH_PERFMON_FIXED_CTR0 + 2, - MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, - MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, - MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, - - /* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */ - MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, - MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, - MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, - MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, - MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, - MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, - MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, - MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, - - MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, - MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, - - /* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */ - MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, - MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, - MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, - MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, - - MSR_AMD64_PERF_CNTR_GLOBAL_CTL, - MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, - MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, -}; - -static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) + - ARRAY_SIZE(msrs_to_save_pmu)]; -static unsigned num_msrs_to_save; - -static const u32 emulated_msrs_all[] = { - MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, - MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, - -#ifdef CONFIG_KVM_HYPERV - HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, - HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, - HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, - HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, - HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, - HV_X64_MSR_RESET, - HV_X64_MSR_VP_INDEX, - HV_X64_MSR_VP_RUNTIME, - HV_X64_MSR_SCONTROL, - HV_X64_MSR_STIMER0_CONFIG, - HV_X64_MSR_VP_ASSIST_PAGE, - HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, - HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL, - HV_X64_MSR_SYNDBG_OPTIONS, - HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, - HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, - HV_X64_MSR_SYNDBG_PENDING_BUFFER, -#endif - - MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, - MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, - - MSR_IA32_TSC_ADJUST, - MSR_IA32_TSC_DEADLINE, - MSR_IA32_ARCH_CAPABILITIES, - MSR_IA32_PERF_CAPABILITIES, - MSR_IA32_MISC_ENABLE, - MSR_IA32_MCG_STATUS, - MSR_IA32_MCG_CTL, - MSR_IA32_MCG_EXT_CTL, - MSR_IA32_SMBASE, - MSR_SMI_COUNT, - MSR_PLATFORM_INFO, - MSR_MISC_FEATURES_ENABLES, - MSR_AMD64_VIRT_SPEC_CTRL, - MSR_AMD64_TSC_RATIO, - MSR_IA32_POWER_CTL, - MSR_IA32_UCODE_REV, - - /* - * KVM always supports the "true" VMX control MSRs, even if the host - * does not. The VMX MSRs as a whole are considered "emulated" as KVM - * doesn't strictly require them to exist in the host (ignoring that - * KVM would refuse to load in the first place if the core set of MSRs - * aren't supported). - */ - MSR_IA32_VMX_BASIC, - MSR_IA32_VMX_TRUE_PINBASED_CTLS, - MSR_IA32_VMX_TRUE_PROCBASED_CTLS, - MSR_IA32_VMX_TRUE_EXIT_CTLS, - MSR_IA32_VMX_TRUE_ENTRY_CTLS, - MSR_IA32_VMX_MISC, - MSR_IA32_VMX_CR0_FIXED0, - MSR_IA32_VMX_CR4_FIXED0, - MSR_IA32_VMX_VMCS_ENUM, - MSR_IA32_VMX_PROCBASED_CTLS2, - MSR_IA32_VMX_EPT_VPID_CAP, - MSR_IA32_VMX_VMFUNC, - - MSR_K7_HWCR, - MSR_KVM_POLL_CONTROL, -}; - -static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; -static unsigned num_emulated_msrs; - -/* - * List of MSRs that control the existence of MSR-based features, i.e. MSRs - * that are effectively CPUID leafs. VMX MSRs are also included in the set of - * feature MSRs, but are handled separately to allow expedited lookups. - */ -static const u32 msr_based_features_all_except_vmx[] = { - MSR_AMD64_DE_CFG, - MSR_IA32_UCODE_REV, - MSR_IA32_ARCH_CAPABILITIES, - MSR_IA32_PERF_CAPABILITIES, -}; - -static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + - (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; -static unsigned int num_msr_based_features; - -/* - * All feature MSRs except uCode revID, which tracks the currently loaded uCode - * patch, are immutable once the vCPU model is defined. - */ -static bool kvm_is_immutable_feature_msr(u32 msr) -{ - int i; - - if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR) - return true; - - for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) { - if (msr == msr_based_features_all_except_vmx[i]) - return msr != MSR_IA32_UCODE_REV; - } - - return false; -} - /* * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM * does not yet virtualize. These include: diff --git a/drivers/crypto/montage/tsse/tsse_vuart.c b/drivers/crypto/montage/tsse/tsse_vuart.c index f49d4ffc9f3c7d..f4d1ef72e63ce1 100644 --- a/drivers/crypto/montage/tsse/tsse_vuart.c +++ b/drivers/crypto/montage/tsse/tsse_vuart.c @@ -74,24 +74,6 @@ static void vuart_serial_out(struct uart_port *port, int offset, int value) writel(value, port->membase + offset); } -static void vuart_wait_for_xmitr(struct uart_port *port) -{ - unsigned int status, tmout = 10000; - - for (;;) { - status = vuart_serial_in(port, VUART_FSR); - if (FIELD_GET(VUART_FSR_TXFIFOE, status)) - break; - if (--tmout == 0) { - pr_err("%s:timeout(10ms), TX is not empty.\n", - __func__); - break; - } - udelay(1); - touch_nmi_watchdog(); - } -} - static unsigned int vuart_tx_empty(struct uart_port *port) { unsigned long flags; @@ -128,7 +110,7 @@ static void vuart_stop_tx(struct uart_port *port) static void vuart_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct tsse_vuart *vuart = (struct tsse_vuart *)port; int count; @@ -137,21 +119,25 @@ static void vuart_tx_chars(struct uart_port *port) return; } - if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { + if (uart_tx_stopped(port) || kfifo_is_empty(&tport->xmit_fifo)) { vuart_stop_tx(port); return; } count = vuart->tx_loadsz; do { - vuart_serial_out(port, VUART_TX, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + unsigned char c; + + if (!uart_fifo_get(port, &c)) + break; + + vuart_serial_out(port, UART_TX, c); port->icount.tx++; - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) break; } while (--count > 0); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } diff --git a/drivers/edac/ieh_edac.c b/drivers/edac/ieh_edac.c index 6c92352091b153..5539aa03a4d916 100644 --- a/drivers/edac/ieh_edac.c +++ b/drivers/edac/ieh_edac.c @@ -536,8 +536,8 @@ static struct notifier_block ieh_mce_dec = { }; static const struct x86_cpu_id ieh_cpuids[] = { - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_u_cfg), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_h_cfg), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_u_cfg), + X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_h_cfg), {} }; MODULE_DEVICE_TABLE(x86cpu, ieh_cpuids); diff --git a/drivers/edac/phytium_edac.c b/drivers/edac/phytium_edac.c index 6b85e5dd037096..b661dd8dabedce 100644 --- a/drivers/edac/phytium_edac.c +++ b/drivers/edac/phytium_edac.c @@ -246,8 +246,8 @@ static int phytium_edac_device_add(struct phytium_edac *edac) edac_dev = edac_device_alloc_ctl_info( sizeof(struct edac_device_ctl_info), - "ras", 1, "soc", 1, 0, NULL, - 0, edac_device_alloc_index()); + "ras", 1, "soc", 1, 0, + edac_device_alloc_index()); if (!edac_dev) res = -ENOMEM; diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c index fdc0c5ba5e0c1b..52caff040a2ad1 100644 --- a/drivers/hwmon/zhaoxin-cputemp.c +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -184,13 +184,12 @@ static int zhaoxin_cputemp_probe(struct platform_device *pdev) return err; } -static int zhaoxin_cputemp_remove(struct platform_device *pdev) +static void zhaoxin_cputemp_remove(struct platform_device *pdev) { struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); - return 0; } static struct platform_driver zhaoxin_cputemp_driver = { @@ -198,7 +197,7 @@ static struct platform_driver zhaoxin_cputemp_driver = { .name = DRVNAME, }, .probe = zhaoxin_cputemp_probe, - .remove = zhaoxin_cputemp_remove, + .remove_new = zhaoxin_cputemp_remove, }; struct pdev_entry { @@ -330,4 +329,4 @@ MODULE_LICENSE("GPL"); module_init(zhaoxin_cputemp_init) module_exit(zhaoxin_cputemp_exit) -MODULE_IMPORT_NS(HWMON_THERMAL); \ No newline at end of file +MODULE_IMPORT_NS(HWMON_THERMAL); diff --git a/drivers/i2c/busses/i2c-zhaoxin-smbus.c b/drivers/i2c/busses/i2c-zhaoxin-smbus.c index b1f749bcc71642..39832e8a6dcee6 100644 --- a/drivers/i2c/busses/i2c-zhaoxin-smbus.c +++ b/drivers/i2c/busses/i2c-zhaoxin-smbus.c @@ -349,15 +349,13 @@ static int zxsmb_probe(struct platform_device *pdev) return i2c_add_adapter(&smb->adap); } -static int zxsmb_remove(struct platform_device *pdev) +static void zxsmb_remove(struct platform_device *pdev) { struct zxsmb *smb = platform_get_drvdata(pdev); i2c_del_adapter(&(smb->adap)); platform_set_drvdata(pdev, NULL); devm_kfree(&pdev->dev, smb); - - return 0; } static const struct acpi_device_id zxsmb_acpi_match[] = { @@ -368,7 +366,7 @@ MODULE_DEVICE_TABLE(acpi, zxsmb_acpi_match); static struct platform_driver zxsmb_driver = { .probe = zxsmb_probe, - .remove = zxsmb_remove, + .remove_new = zxsmb_remove, .driver = { .name = ZXSMB_NAME, .acpi_match_table = ACPI_PTR(zxsmb_acpi_match), diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c index ac1d16810e87a8..78fd03ef068115 100644 --- a/drivers/irqchip/irq-gic-phytium-2500.c +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -658,14 +658,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type) if (gic_irq_in_rdist(d)) { base = gic_data_rdist_sgi_base(); - ret = gic_configure_irq(index, type, base + offset, gic_redist_wait_for_rwp); + ret = gic_configure_irq(index, type, base + offset); + gic_redist_wait_for_rwp(); mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); if ((mpidr & 0xffff) == 0) { rbase = base + 64*SZ_128K; for (i = 0; i < 4; i++) { - ret = gic_configure_irq(index, type, rbase + offset, NULL); + ret = gic_configure_irq(index, type, rbase + offset); gic_do_wait_for_rwp(rbase - SZ_64K, GICR_CTLR_RWP); rbase = rbase + SZ_128K; } @@ -673,7 +674,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) } else { skt = mars3_irq_to_skt(gic_irq(d)); base = mars3_gic_dists[skt].dist_base; - ret = gic_configure_irq(index, type, base + offset, NULL); + ret = gic_configure_irq(index, type, base + offset); gic_do_wait_for_rwp(base, GICD_CTLR_RWP); } @@ -944,10 +945,10 @@ static void __init gic_dist_init(void) writel_relaxed(0, base + GICD_ICFGRnE + i / 4); for (i = 0; i < GIC_ESPI_NR; i += 4) - writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI), base + GICD_IPRIORITYRnE + i); /* Now do the common stuff, and wait for the distributor to drain */ - gic_dist_config(base, GIC_LINE_NR, NULL); + gic_dist_config(base, GIC_LINE_NR, GICD_INT_DEF_PRI); gic_do_wait_for_rwp(base, GICD_CTLR_RWP); // do sync outside of gic_dist_config val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; @@ -1294,7 +1295,8 @@ static void gic_cpu_init(void) for (i = 0; i < gic_data.ppi_nr + 16; i += 32) writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); - gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); + gic_cpu_config(rbase, gic_data.ppi_nr + 16, GICD_INT_DEF_PRI); + gic_redist_wait_for_rwp(); mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); @@ -1305,7 +1307,7 @@ static void gic_cpu_init(void) /* Configure SGIs/PPIs as non-secure Group-1 */ writel_relaxed(~0, rbase + GICR_IGROUPR0); - gic_cpu_config(rbase, gic_data.ppi_nr + 16, NULL); + gic_cpu_config(rbase, gic_data.ppi_nr + 16, GICD_INT_DEF_PRI); gic_do_wait_for_rwp(rbase - SZ_64K, GICR_CTLR_RWP); rbase = rbase + SZ_128K; diff --git a/drivers/net/ethernet/3snic/Kconfig b/drivers/net/ethernet/3snic/Kconfig new file mode 100644 index 00000000000000..eb71854198d40d --- /dev/null +++ b/drivers/net/ethernet/3snic/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 3SNIC network device configuration +# + +config NET_VENDOR_3SNIC + bool "3SNIC smart NIC devices" + depends on PCI + select NET_DEVLINK + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about 3SNIC cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_3SNIC + +source "drivers/net/ethernet/3snic/sssnic/Kconfig" + +endif # NET_VENDOR_3SNIC diff --git a/drivers/net/ethernet/3snic/Makefile b/drivers/net/ethernet/3snic/Makefile new file mode 100644 index 00000000000000..eb9a8b8cf105c3 --- /dev/null +++ b/drivers/net/ethernet/3snic/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the 3SNIC network device drivers. +# + +obj-$(CONFIG_SSSNIC) += sssnic/ diff --git a/drivers/net/ethernet/3snic/sssnic/Kconfig b/drivers/net/ethernet/3snic/sssnic/Kconfig new file mode 100644 index 00000000000000..d515a49cb26855 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 3SNIC network device configuration +# + +config SSSNIC + tristate "3SNIC Ethernet Controller SSSNIC Support" + depends on PCI + depends on ARM64 || X86_64 + default y + help + This driver supports 3SNIC Ethernet Controller SSSNIC device. + For more information about this product, go to the product + description with smart NIC: + + + + To compile this driver as a module, choose M here. The module + will be called sssnic. diff --git a/drivers/net/ethernet/3snic/sssnic/Makefile b/drivers/net/ethernet/3snic/sssnic/Makefile new file mode 100644 index 00000000000000..9aad44b9c46f51 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the 3SNIC network device drivers. +# + +obj-$(CONFIG_SSSNIC) += nic/ diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h new file mode 100644 index 00000000000000..afc5ff37f4a3cf --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADAPTER_H +#define SSS_ADAPTER_H + +#include +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_svc_cap.h" +#include "sss_sriov_info.h" + +#define SSS_MAX_FUNC 4096 + +struct sss_card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + u8 bus_id; + u8 resvd[7]; + u16 func_num; + atomic_t channel_timeout_cnt; + void *func_handle_array[SSS_MAX_FUNC]; + void *dbgtool_info; +}; + +/* Structure pcidev private */ +struct sss_pci_adapter { + struct pci_dev *pcidev; + void *hwdev; + + struct sss_hal_dev hal_dev; + + /* Record the upper driver object address, + * such as nic_dev and toe_dev, fc_dev + */ + void *uld_dev[SSS_SERVICE_TYPE_MAX]; + + /* Record the upper driver object name */ + char uld_dev_name[SSS_SERVICE_TYPE_MAX][IFNAMSIZ]; + + /* Manage all function device linked by list */ + struct list_head node; + + void __iomem *cfg_reg_bar; + void __iomem *intr_reg_bar; + void __iomem *mgmt_reg_bar; + void __iomem *db_reg_bar; + u64 db_dwqe_len; + u64 db_base_paddr; + + struct sss_card_node *chip_node; + + int init_state; + + struct sss_sriov_info sriov_info; + + atomic_t ref_cnt; + + atomic_t uld_ref_cnt[SSS_SERVICE_TYPE_MAX]; + spinlock_t uld_lock; /* protect uld probe and remove */ + + /* set when uld driver processing event */ + unsigned long uld_run_state; + + unsigned long uld_attach_state; + + /* lock for attach/detach uld */ + struct mutex uld_attach_mutex; + + spinlock_t dettach_uld_lock; /* spin lock for uld_attach_state access */ +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h new file mode 100644 index 00000000000000..fbcf0b007194b0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADM_INFO_H +#define SSS_ADM_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_common.h" + +enum sss_adm_msg_type { + /* write to mgmt cpu command with completion */ + SSS_ADM_MSG_WRITE_TO_MGMT_MODULE = 2, + + /* multi read command with completion notification */ + SSS_ADM_MSG_MULTI_READ = 3, + + /* write command without completion notification */ + SSS_ADM_MSG_POLL_WRITE = 4, + + /* read command without completion notification */ + SSS_ADM_MSG_POLL_READ = 5, + + /* read from mgmt cpu command with completion */ + SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE = 6, + + SSS_ADM_MSG_MAX, +}; + +struct sss_adm_msg_state { + u64 head; + u32 desc_buf; + u32 elem_hi; + u32 elem_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct sss_adm_msg_elem { + u64 control; + + u64 next_elem_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_msg_paddr; + } write; + + struct { + u64 hw_wb_reply_paddr; + u64 hw_msg_paddr; + } read; + }; +}; + +struct sss_adm_msg_reply_fmt { + u64 head; + u64 reply; +}; + +struct sss_adm_msg_elem_ctx { + struct sss_adm_msg_elem *elem_vaddr; + + void *adm_msg_vaddr; + + struct sss_adm_msg_reply_fmt *reply_fmt; + + struct completion done; + int state; + + u32 store_pi; + void *hwdev; +}; + +struct sss_adm_msg { + void *hwdev; + + enum sss_adm_msg_type msg_type; + + u32 elem_num; + + u16 elem_size; + u16 reply_size; + + u32 pi; + u32 ci; + + struct semaphore sem; + spinlock_t async_lock; /* protect adm msg async and sync */ + dma_addr_t wb_state_paddr; + + dma_addr_t head_elem_paddr; + + struct sss_adm_msg_state *wb_state; + + struct sss_adm_msg_elem *head_node; + + struct sss_adm_msg_elem_ctx *elem_ctx; + struct sss_adm_msg_elem *now_node; + + struct sss_dma_addr_align elem_addr; + + u8 *elem_vaddr_base; + u8 *reply_vaddr_base; + u8 *buf_vaddr_base; + + u64 elem_paddr_base; + u64 reply_paddr_base; + u64 buf_paddr_base; + u64 elem_size_align; + u64 reply_size_align; + u64 buf_size_align; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h new file mode 100644 index 00000000000000..bdcec6ae4ad813 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_AEQ_INFO_H +#define SSS_AEQ_INFO_H + +#include +#include + +#include "sss_eq_info.h" +#include "sss_hw_aeq.h" + +#define SSS_MAX_AEQ 4 + +typedef void (*sss_aeq_hw_event_handler_t)(void *pri_handle, u8 *data, u8 size); +typedef u8 (*sss_aeq_sw_event_handler_t)(void *pri_handle, u8 event, u8 *data); + +struct sss_aeq_info { + void *hwdev; + + sss_aeq_hw_event_handler_t hw_event_handler[SSS_AEQ_EVENT_MAX]; + void *hw_event_data[SSS_AEQ_EVENT_MAX]; + sss_aeq_sw_event_handler_t sw_event_handler[SSS_AEQ_SW_EVENT_MAX]; + void *sw_event_data[SSS_AEQ_SW_EVENT_MAX]; + unsigned long hw_event_handler_state[SSS_AEQ_EVENT_MAX]; + unsigned long sw_event_handler_state[SSS_AEQ_SW_EVENT_MAX]; + + struct sss_eq aeq[SSS_MAX_AEQ]; + u16 num; + u16 rsvd1; + u32 rsvd2; + struct workqueue_struct *workq; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h new file mode 100644 index 00000000000000..749268d67a6bfa --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_BOARD_INFO_H +#define SSS_BOARD_INFO_H + +enum sss_board_type_define { + SSS_BOARD_TYPE_MPU_DEFAULT = 0, /* Default config */ + SSS_BOARD_TYPE_TEST_EVB_4X25G = 1, /* EVB Board */ + SSS_BOARD_TYPE_TEST_CEM_2X100G = 2, /* 2X100G CEM Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, /* 4X32G SmartIO FC Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, /* 4X25GE SmartIO TIOE Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, /* 4X25GE SmartIO ROCE Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, /* 4X25GE SmartIO ROCE_AA Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, /* 4X25GE SmartIO container Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, /* 4X25GE SmartIO container switch Card */ + SSS_BOARD_TYPE_STRG_2X100G_TIOE = 40, /* 2X100G SmartIO TIOE Card */ + SSS_BOARD_TYPE_STRG_2X100G_ROCE = 41, /* 2X100G SmartIO ROCE Card */ + SSS_BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, /* 2X100G SmartIO ROCE_AA Card */ + SSS_BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, /* 2X25G ETH Standard card 75MPPS */ + SSS_BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, /* 2X25G ETH Standard card 40MPPS */ + SSS_BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, /* 4X25G ETH Standard card 120MPPS */ + SSS_BOARD_TYPE_CAL_2X32G_FC_HBA = 110, /* 2X32G FC HBA card */ + SSS_BOARD_TYPE_CAL_2X16G_FC_HBA = 111, /* 2X16G FC HBA card */ + SSS_BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, /* 2X100G ETH Standard card 120MPPS */ + SSS_BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, /* 2X100G SDI 5.1 Card */ + SSS_BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, /* 2x25G SDI5.0 Lite Card */ + SSS_BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, /* 2x100G SDI5.0 Card */ + SSS_BOARD_MAX_TYPE = 0xFF +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h new file mode 100644 index 00000000000000..e6806f64cadad8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CEQ_INFO_H +#define SSS_CEQ_INFO_H + +#include + +#include "sss_hw_ceq.h" +#include "sss_eq_info.h" + +#define SSS_MAX_CEQ 32 + +typedef void (*sss_ceq_event_handler_t)(void *dev, u32 data); + +struct sss_ceq_info { + void *hwdev; + + sss_ceq_event_handler_t event_handler[SSS_CEQ_EVENT_MAX]; + void *event_handler_data[SSS_CEQ_EVENT_MAX]; + void *ceq_data[SSS_CEQ_EVENT_MAX]; + unsigned long event_handler_state[SSS_CEQ_EVENT_MAX]; + + struct sss_eq ceq[SSS_MAX_CEQ]; + u16 num; + u16 rsvd1; + u32 rsvd2; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h new file mode 100644 index 00000000000000..08e4389957964d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CSR_H +#define SSS_CSR_H + +#define SSS_CSR_CFG_FLAG 0x40000000 + +#define SSS_MGMT_FLAG 0xC0000000 + +#define SSS_CSR_FLAG_MASK 0x3FFFFFFF + +#define SSS_VF_CFG_REG_OFFSET 0x2000 + +#define SSS_HOST_CSR_BASE_ADDR (SSS_MGMT_FLAG + 0x6000) +#define SSS_CSR_GLOBAL_BASE_ADDR (SSS_MGMT_FLAG + 0x6400) + +/* HW interface registers */ +#define SSS_CSR_HW_ATTR0_ADDR (SSS_CSR_CFG_FLAG + 0x0) +#define SSS_CSR_HW_ATTR1_ADDR (SSS_CSR_CFG_FLAG + 0x4) +#define SSS_CSR_HW_ATTR2_ADDR (SSS_CSR_CFG_FLAG + 0x8) +#define SSS_CSR_HW_ATTR3_ADDR (SSS_CSR_CFG_FLAG + 0xC) +#define SSS_CSR_HW_ATTR4_ADDR (SSS_CSR_CFG_FLAG + 0x10) +#define SSS_CSR_HW_ATTR5_ADDR (SSS_CSR_CFG_FLAG + 0x14) +#define SSS_CSR_HW_ATTR6_ADDR (SSS_CSR_CFG_FLAG + 0x18) + +#define SSS_HW_CSR_MBX_DATA_OFF 0x80 +#define SSS_HW_CSR_MBX_CTRL_OFF (SSS_CSR_CFG_FLAG + 0x0100) +#define SSS_HW_CSR_MBX_INT_OFFSET_OFF (SSS_CSR_CFG_FLAG + 0x0104) +#define SSS_HW_CSR_MBX_RES_H_OFF (SSS_CSR_CFG_FLAG + 0x0108) +#define SSS_HW_CSR_MBX_RES_L_OFF (SSS_CSR_CFG_FLAG + 0x010C) + +#define SSS_PPF_ELECT_OFF 0x0 +#define SSS_MPF_ELECT_OFF 0x20 + +#define SSS_CSR_PPF_ELECT_ADDR \ + (SSS_HOST_CSR_BASE_ADDR + SSS_PPF_ELECT_OFF) + +#define SSS_CSR_GLOBAL_MPF_ELECT_ADDR \ + (SSS_HOST_CSR_BASE_ADDR + SSS_MPF_ELECT_OFF) + +#define SSS_CSR_HW_PPF_ELECT_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x60) +#define SSS_CSR_HW_PPF_ELECT_PORT_STRIDE 0x4 + +#define SSS_CSR_FUNC_PPF_ELECT(host_id) \ + (SSS_CSR_HW_PPF_ELECT_BASE_ADDR + \ + (host_id) * SSS_CSR_HW_PPF_ELECT_PORT_STRIDE) + +#define SSS_CSR_DMA_ATTR_TBL_ADDR (SSS_CSR_CFG_FLAG + 0x380) +#define SSS_CSR_DMA_ATTR_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x390) + +/* CLP registers */ +#define SSS_BAR3_CLP_BASE_ADDR (SSS_MGMT_FLAG + 0x0000) + +#define SSS_UCPU_CLP_SIZE_REG (SSS_HOST_CSR_BASE_ADDR + 0x40) +#define SSS_UCPU_CLP_REQBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x44) +#define SSS_UCPU_CLP_RSPBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x48) +#define SSS_UCPU_CLP_REQ_REG (SSS_HOST_CSR_BASE_ADDR + 0x4c) +#define SSS_UCPU_CLP_RSP_REG (SSS_HOST_CSR_BASE_ADDR + 0x50) +#define SSS_CLP_REG(member) (SSS_UCPU_CLP_##member##_REG) + +#define SSS_CLP_REQ_DATA SSS_BAR3_CLP_BASE_ADDR +#define SSS_CLP_RSP_DATA (SSS_BAR3_CLP_BASE_ADDR + 0x1000) +#define SSS_CLP_DATA(member) (SSS_CLP_##member##_DATA) + +/* MSI-X registers */ +#define SSS_CSR_MSIX_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x310) +#define SSS_CSR_MSIX_CTRL_ADDR (SSS_CSR_CFG_FLAG + 0x300) +#define SSS_CSR_MSIX_CNT_ADDR (SSS_CSR_CFG_FLAG + 0x304) +#define SSS_CSR_FUNC_MSI_CLR_WR_ADDR (SSS_CSR_CFG_FLAG + 0x58) + +#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 +#define SSS_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 +#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 +#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 +#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 +#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_SHIFT 22 + +#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U +#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U +#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_MASK 0x3FFU + +#define SSS_SET_MSI_CLR_INDIR(val, member) \ + (((val) & SSS_MSI_CLR_INDIR_##member##_MASK) << \ + SSS_MSI_CLR_INDIR_##member##_SHIFT) + +/* EQ registers */ +#define SSS_AEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x210) +#define SSS_CEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x290) + +#define SSS_EQ_INDIR_ID_ADDR(type) \ + ((type == SSS_AEQ) ? SSS_AEQ_INDIR_ID_ADDR : SSS_CEQ_INDIR_ID_ADDR) + +#define SSS_AEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x240) +#define SSS_CEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x2C0) + +#define SSS_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define SSS_AEQ_PHY_HI_ADDR_REG(pg_num) \ + (SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE) + +#define SSS_AEQ_PHY_LO_ADDR_REG(pg_num) \ + (SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SSS_CEQ_PHY_HI_ADDR_REG(pg_num) \ + (SSS_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE) + +#define SSS_CEQ_PHY_LO_ADDR_REG(pg_num) \ + (SSS_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SSS_CSR_AEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x200) +#define SSS_CSR_AEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x204) +#define SSS_CSR_AEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x208) +#define SSS_CSR_AEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x20C) +#define SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x50) + +#define SSS_CSR_CEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x280) +#define SSS_CSR_CEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x284) +#define SSS_CSR_CEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x288) +#define SSS_CSR_CEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x28c) +#define SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x54) + +/* ADM MSG registers */ +#define SSS_CSR_ADM_MSG_BASE (SSS_MGMT_FLAG + 0x2000) + +#define SSS_CSR_ADM_MSG_STRIDE 0x80 + +#define SSS_CSR_ADM_MSG_HEAD_HI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x0 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_HEAD_LO_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x4 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_HI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x8 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_LO_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0xC + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x10 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_CTRL_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x14 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_PI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x1C + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_REQ_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x20 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_0_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x30 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +/* self test register */ +#define SSS_MGMT_HEALTH_STATUS_ADDR (SSS_MGMT_FLAG + 0x983c) + +#define SSS_CHIP_BASE_INFO_ADDR (SSS_MGMT_FLAG + 0xB02C) + +#define SSS_CHIP_ERR_STATUS0_ADDR (SSS_MGMT_FLAG + 0xC0EC) +#define SSS_CHIP_ERR_STATUS1_ADDR (SSS_MGMT_FLAG + 0xC0F0) + +#define SSS_ERR_INFO0_ADDR (SSS_MGMT_FLAG + 0xC0F4) +#define SSS_ERR_INFO1_ADDR (SSS_MGMT_FLAG + 0xC0F8) +#define SSS_ERR_INFO2_ADDR (SSS_MGMT_FLAG + 0xC0FC) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h new file mode 100644 index 00000000000000..02727d453fed46 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CTRLQ_INFO_H +#define SSS_CTRLQ_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_mbx_msg.h" +#include "sss_hw_wq.h" +#include "sss_hw_ctrlq.h" + +#define SSS_DEFAULT_WQ_PAGE_SIZE 0x100000 +#define SSS_HW_WQ_PAGE_SIZE 0x1000 +#define SSS_MAX_WQ_PAGE_NUM 8 + +/* ctrlq ack type */ +enum sss_ack_type { + SSS_ACK_TYPE_CTRLQ, + SSS_ACK_TYPE_SHARE_CQN, + SSS_ACK_TYPE_APP_CQN, + + SSS_MOD_ACK_MAX = 15, +}; + +enum sss_ctrlq_type { + SSS_CTRLQ_SYNC, + SSS_CTRLQ_ASYNC, + SSS_MAX_CTRLQ_TYPE = 4 +}; + +enum sss_ctrlq_msg_type { + SSS_MSG_TYPE_NONE, + SSS_MSG_TYPE_SET_ARM, + SSS_MSG_TYPE_DIRECT_RESP, + SSS_MSG_TYPE_SGE_RESP, + SSS_MSG_TYPE_ASYNC, + SSS_MSG_TYPE_PSEUDO_TIMEOUT, + SSS_MSG_TYPE_TIMEOUT, + SSS_MSG_TYPE_FORCE_STOP, + SSS_MSG_TYPE_MAX +}; + +struct sss_ctrlq_cmd_info { + enum sss_ctrlq_msg_type msg_type; + u16 channel; + + struct completion *done; + int *err_code; + int *cmpt_code; + u64 *direct_resp; + u64 msg_id; + + struct sss_ctrl_msg_buf *in_buf; + struct sss_ctrl_msg_buf *out_buf; +}; + +struct sss_ctrlq { + struct sss_wq wq; + + enum sss_ctrlq_type ctrlq_type; + int wrapped; + + /* spinlock for send ctrlq commands */ + spinlock_t ctrlq_lock; + + struct sss_ctrlq_ctxt_info ctrlq_ctxt; + + struct sss_ctrlq_cmd_info *cmd_info; + + void *hwdev; +}; + +struct sss_ctrlq_info { + void *hwdev; + + struct pci_pool *msg_buf_pool; + + /* doorbell area */ + u8 __iomem *db_base; + + /* All ctrlq's CLA of a VF occupy a PAGE when ctrlq wq is 1-level CLA */ + void *wq_block_vaddr; + dma_addr_t wq_block_paddr; + struct sss_ctrlq ctrlq[SSS_MAX_CTRLQ_TYPE]; + + u32 state; + u32 disable_flag; + + u8 lock_channel_en; + u8 num; + u8 rsvd[6]; + unsigned long channel_stop; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h new file mode 100644 index 00000000000000..c8a16dabeacc19 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_EQ_INFO_H +#define SSS_EQ_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_irq.h" +#include "sss_hw_svc_cap.h" + +#define SSS_EQ_IRQ_NAME_LEN 64 + +enum sss_eq_type { + SSS_AEQ, + SSS_CEQ +}; + +typedef void (*sss_init_desc_handler_t)(void *eq); +typedef u32 (*sss_chip_init_attr_handler_t)(void *eq); + +struct sss_eq { + char *name; + void *hwdev; + enum sss_eq_type type; + u32 page_size; + u32 old_page_size; + u32 len; + + u32 ci; + + u16 wrap; + u16 qid; + + u16 entry_size; + u16 page_num; + + u32 num_entry_per_pg; + + struct sss_irq_desc irq_desc; + char irq_name[SSS_EQ_IRQ_NAME_LEN]; + + struct sss_dma_addr_align *page_array; + + struct work_struct aeq_work; + struct tasklet_struct ceq_tasklet; + + u64 hw_intr_jiffies; + u64 sw_intr_jiffies; + + sss_init_desc_handler_t init_desc_handler; + sss_chip_init_attr_handler_t init_attr_handler; + irq_handler_t irq_handler; +}; + +struct sss_eq_cfg { + enum sss_service_type type; + int id; + int free; /* 1 - alocated, 0- freed */ +}; + +struct sss_eq_info { + struct sss_eq_cfg *eq; + + u8 ceq_num; + + u8 remain_ceq_num; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h new file mode 100644 index 00000000000000..2970438cb3ac56 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_H +#define SSS_HWDEV_H + +#include +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_mbx_msg.h" +#include "sss_hw_statistics.h" +#include "sss_hw_event.h" + +#include "sss_hwif.h" +#include "sss_mgmt_info.h" +#include "sss_ctrlq_info.h" +#include "sss_aeq_info.h" +#include "sss_ceq_info.h" +#include "sss_mbx_info.h" +#include "sss_mgmt_channel.h" + +#define SSSNIC_CHANNEL_DETECT_PERIOD (5 * 1000) + +enum sss_func_mode { + SSS_FUNC_MOD_MIN, + + /* single host */ + SSS_FUNC_MOD_NORMAL_HOST = SSS_FUNC_MOD_MIN, + + /* multi host, bare-metal, sdi side */ + SSS_FUNC_MOD_MULTI_BM_MASTER, + + /* multi host, bare-metal, host side */ + SSS_FUNC_MOD_MULTI_BM_SLAVE, + + /* multi host, vm mode, sdi side */ + SSS_FUNC_MOD_MULTI_VM_MASTER, + + /* multi host, vm mode, host side */ + SSS_FUNC_MOD_MULTI_VM_SLAVE, + + SSS_FUNC_MOD_MAX = SSS_FUNC_MOD_MULTI_VM_SLAVE, +}; + +struct sss_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct sss_mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + + struct sss_page_addr *brm_srch_page_addr; +}; + +struct sss_devlink { + void *hwdev; + u8 active_cfg_id; /* 1 ~ 8 */ + u8 switch_cfg_id; /* 1 ~ 8 */ +}; + +struct sss_heartbeat { + u8 pcie_link_down; + u8 heartbeat_lost; + u16 rsvd; + u32 pcie_link_down_cnt; + struct timer_list heartbeat_timer; + struct work_struct lost_work; +}; + +struct sss_aeq_stat { + u16 busy_cnt; + u16 rsvd; + u64 cur_recv_cnt; + u64 last_recv_cnt; +}; + +struct sss_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct sss_hwdev { + void *adapter_hdl; /* pointer to sss_pci_adapter or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + + /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + void *dev_hdl; + void *chip_node; + + void *service_adapter[SSS_SERVICE_TYPE_MAX]; + + u32 wq_page_size; + int chip_present_flag; + u8 poll; /* use polling mode or int mode */ + u8 rsvd[3]; + struct sss_hwif *hwif; /* include void __iomem *bar */ + struct sss_comm_global_attr glb_attr; + u64 features[SSS_MAX_FEATURE_QWORD]; + + struct sss_mgmt_info *mgmt_info; + + struct sss_ctrlq_info *ctrlq_info; + struct sss_aeq_info *aeq_info; + struct sss_ceq_info *ceq_info; + struct sss_mbx *mbx; // mbx + struct sss_msg_pf_to_mgmt *pf_to_mgmt; // adm + struct sss_clp_pf_to_mgmt *clp_pf_to_mgmt; + + struct sss_hw_stats hw_stats; + u8 *chip_fault_stats; + + sss_event_handler_t event_handler; + void *event_handler_data; + + struct sss_board_info board_info; + + struct delayed_work sync_time_task; + struct delayed_work channel_detect_task; + + struct workqueue_struct *workq; + + struct sss_heartbeat heartbeat; + + ulong func_state; + spinlock_t channel_lock; /* protect channel init and deinit */ + + struct sss_devlink *devlink_dev; + + enum sss_func_mode func_mode; + + struct sss_aeq_stat aeq_stat; + + u16 aeq_busy_cnt; +}; + +#define SSS_TO_HWDEV(ptr) ((struct sss_hwdev *)(ptr)->hwdev) +#define SSS_TO_DEV(hwdev) (((struct sss_hwdev *)hwdev)->dev_hdl) +#define SSS_TO_HWIF(hwdev) (((struct sss_hwdev *)hwdev)->hwif) +#define SSS_TO_MGMT_INFO(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info) +#define SSS_TO_AEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->aeq_info) +#define SSS_TO_CEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ceq_info) +#define SSS_TO_CTRLQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ctrlq_info) +#define SSS_TO_IRQ_INFO(hwdev) (&((struct sss_hwdev *)hwdev)->mgmt_info->irq_info) +#define SSS_TO_SVC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap)) +#define SSS_TO_NIC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap)) +#define SSS_TO_MAX_SQ_NUM(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap.max_sq) +#define SSS_TO_PHY_PORT_ID(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_id) +#define SSS_TO_MAX_VF_NUM(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.max_vf) +#define SSS_TO_FUNC_COS_BITMAP(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.cos_valid_bitmap) +#define SSS_TO_PORT_COS_BITMAP(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_cos_valid_bitmap) + +enum sss_servic_bit_define { + SSS_SERVICE_BIT_NIC = 0, + SSS_SERVICE_BIT_ROCE = 1, + SSS_SERVICE_BIT_VBS = 2, + SSS_SERVICE_BIT_TOE = 3, + SSS_SERVICE_BIT_IPSEC = 4, + SSS_SERVICE_BIT_FC = 5, + SSS_SERVICE_BIT_VIRTIO = 6, + SSS_SERVICE_BIT_OVS = 7, + SSS_SERVICE_BIT_NVME = 8, + SSS_SERVICE_BIT_ROCEAA = 9, + SSS_SERVICE_BIT_CURRENET = 10, + SSS_SERVICE_BIT_PPA = 11, + SSS_SERVICE_BIT_MIGRATE = 12, + SSS_MAX_SERVICE_BIT +}; + +#define SSS_CFG_SERVICE_MASK_NIC (0x1 << SSS_SERVICE_BIT_NIC) +#define SSS_CFG_SERVICE_MASK_ROCE (0x1 << SSS_SERVICE_BIT_ROCE) +#define SSS_CFG_SERVICE_MASK_VBS (0x1 << SSS_SERVICE_BIT_VBS) +#define SSS_CFG_SERVICE_MASK_TOE (0x1 << SSS_SERVICE_BIT_TOE) +#define SSS_CFG_SERVICE_MASK_IPSEC (0x1 << SSS_SERVICE_BIT_IPSEC) +#define SSS_CFG_SERVICE_MASK_FC (0x1 << SSS_SERVICE_BIT_FC) +#define SSS_CFG_SERVICE_MASK_VIRTIO (0x1 << SSS_SERVICE_BIT_VIRTIO) +#define SSS_CFG_SERVICE_MASK_OVS (0x1 << SSS_SERVICE_BIT_OVS) +#define SSS_CFG_SERVICE_MASK_NVME (0x1 << SSS_SERVICE_BIT_NVME) +#define SSS_CFG_SERVICE_MASK_ROCEAA (0x1 << SSS_SERVICE_BIT_ROCEAA) +#define SSS_CFG_SERVICE_MASK_CURRENET (0x1 << SSS_SERVICE_BIT_CURRENET) +#define SSS_CFG_SERVICE_MASK_PPA (0x1 << SSS_SERVICE_BIT_PPA) +#define SSS_CFG_SERVICE_MASK_MIGRATE (0x1 << SSS_SERVICE_BIT_MIGRATE) + +#define SSS_CFG_SERVICE_RDMA_EN SSS_CFG_SERVICE_MASK_ROCE + +#define SSS_IS_NIC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_NIC) +#define SSS_IS_ROCE_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_ROCE) +#define SSS_IS_VBS_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_VBS) +#define SSS_IS_TOE_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_TOE) +#define SSS_IS_IPSEC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_IPSEC) +#define SSS_IS_FC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_FC) +#define SSS_IS_OVS_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_OVS) +#define SSS_IS_RDMA_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_RDMA_EN) +#define SSS_IS_RDMA_ENABLE(dev) \ + ((dev)->mgmt_info->svc_cap.sf_svc_attr.rdma_en) +#define SSS_IS_PPA_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_PPA) +#define SSS_IS_MIGR_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_MIGRATE) + +#define SSS_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) +#define SSS_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) +#define SSS_MGMT_CPU_NODE_ID(hwdev) \ + ((hwdev)->glb_attr.mgmt_host_node_id) + +#define SSS_GET_FUNC_TYPE(hwdev) ((hwdev)->hwif->attr.func_type) +#define SSS_IS_PF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PF) +#define SSS_IS_VF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_VF) +#define SSS_IS_PPF(dev) \ + (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PPF) + +#define SSS_GET_FUNC_ID(hwdev) ((hwdev)->hwif->attr.func_id) + +#define SSS_IS_BMGW_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_MASTER) +#define SSS_IS_BMGW_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_SLAVE) +#define SSS_IS_VM_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_MASTER) +#define SSS_IS_VM_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_SLAVE) + +#define SSS_IS_MASTER_HOST(hwdev) \ + (SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_VM_MASTER_HOST(hwdev)) + +#define SSS_IS_SLAVE_HOST(hwdev) \ + (SSS_IS_BMGW_SLAVE_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev)) + +#define SSS_IS_MULTI_HOST(hwdev) \ + (SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_BMGW_SLAVE_HOST(hwdev) || \ + SSS_IS_VM_MASTER_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev)) + +#define SSS_SPU_HOST_ID 4 + +#define SSS_SUPPORT_ADM_MSG(hwdev) ((hwdev)->features[0] & SSS_COMM_F_ADM) +#define SSS_SUPPORT_MBX_SEGMENT(hwdev) \ + (SSS_GET_HWIF_PCI_INTF_ID((hwdev)->hwif) == SSS_SPU_HOST_ID) +#define SSS_SUPPORT_CTRLQ_NUM(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CTRLQ_NUM) +#define SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_VIRTIO_VQ_SIZE) +#define SSS_SUPPORT_CHANNEL_DETECT(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT) +#define SSS_SUPPORT_CLP(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CLP) + +enum { + SSS_CFG_FREE = 0, + SSS_CFG_BUSY = 1 +}; + +int sss_init_pci(void); +void sss_exit_pci(void); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h new file mode 100644 index 00000000000000..d7e18653e79438 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_H +#define SSS_HWIF_H + +#include +#include + +struct sss_db_pool { + unsigned long *bitmap; + u32 bit_size; + + /* spinlock for allocating doorbell area */ + spinlock_t id_lock; +}; + +struct sss_func_attr { + enum sss_func_type func_type; + + u16 func_id; + u8 pf_id; + u8 pci_intf_id; + + u16 global_vf_off; + u8 mpf_id; + u8 ppf_id; + + u16 irq_num; /* max: 2 ^ 15 */ + u8 aeq_num; /* max: 2 ^ 3 */ + u8 ceq_num; /* max: 2 ^ 7 */ + + u16 sq_num; /* max: 2 ^ 8 */ + u8 dma_attr_num; /* max: 2 ^ 6 */ + u8 msix_flex_en; +}; + +struct sss_hwif { + u8 __iomem *cfg_reg_base; + u8 __iomem *mgmt_reg_base; + u64 db_base_paddr; + u64 db_dwqe_len; + u8 __iomem *db_base_vaddr; + + void *pdev; + + struct sss_db_pool db_pool; + + struct sss_func_attr attr; +}; + +#define SSS_GET_HWIF_AEQ_NUM(hwif) ((hwif)->attr.aeq_num) +#define SSS_GET_HWIF_CEQ_NUM(hwif) ((hwif)->attr.ceq_num) +#define SSS_GET_HWIF_IRQ_NUM(hwif) ((hwif)->attr.irq_num) +#define SSS_GET_HWIF_GLOBAL_ID(hwif) ((hwif)->attr.func_id) +#define SSS_GET_HWIF_PF_ID(hwif) ((hwif)->attr.pf_id) +#define SSS_GET_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_off) +#define SSS_GET_HWIF_PPF_ID(hwif) ((hwif)->attr.ppf_id) +#define SSS_GET_HWIF_MPF_ID(hwif) ((hwif)->attr.mpf_id) +#define SSS_GET_HWIF_PCI_INTF_ID(hwif) ((hwif)->attr.pci_intf_id) +#define SSS_GET_HWIF_FUNC_TYPE(hwif) ((hwif)->attr.func_type) +#define SSS_GET_HWIF_MSIX_EN(hwif) ((hwif)->attr.msix_flex_en) + +#define SSS_SET_HWIF_AEQ_NUM(hwif, val) \ + ((hwif)->attr.aeq_num = (val)) + +#define SSS_SET_HWIF_CEQ_NUM(hwif, val) \ + ((hwif)->attr.ceq_num = (val)) + +#define SSS_SET_HWIF_IRQ_NUM(hwif, val) \ + ((hwif)->attr.irq_num = (val)) + +#define SSS_SET_HWIF_GLOBAL_ID(hwif, val) \ + ((hwif)->attr.func_id = (val)) + +#define SSS_SET_HWIF_PF_ID(hwif, val) \ + ((hwif)->attr.pf_id = (val)) + +#define SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, val) \ + ((hwif)->attr.global_vf_off = (val)) + +#define SSS_SET_HWIF_PPF_ID(hwif, val) \ + ((hwif)->attr.ppf_id = (val)) + +#define SSS_SET_HWIF_MPF_ID(hwif, val) \ + ((hwif)->attr.mpf_id = (val)) + +#define SSS_SET_HWIF_PCI_INTF_ID(hwif, val) \ + ((hwif)->attr.pci_intf_id = (val)) + +#define SSS_SET_HWIF_FUNC_TYPE(hwif, val) \ + ((hwif)->attr.func_type = (val)) + +#define SSS_SET_HWIF_DMA_ATTR_NUM(hwif, val) \ + ((hwif)->attr.dma_attr_num = (val)) + +#define SSS_SET_HWIF_MSIX_EN(hwif, val) \ + ((hwif)->attr.msix_flex_en = (val)) + +#define SSS_SET_HWIF_SQ_NUM(hwif, val) \ + ((hwif)->attr.sq_num = (val)) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h new file mode 100644 index 00000000000000..dfc2a68680430c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_IRQ_INFO_H +#define SSS_IRQ_INFO_H + +#include +#include + +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" + +struct sss_irq { + enum sss_service_type type; + int busy; /* 1 - allocated, 0 - freed */ + struct sss_irq_desc desc; +}; + +struct sss_irq_info { + struct sss_irq *irq; + u16 total_num; + u16 free_num; + u16 max_num; /* device max irq number */ + + struct mutex irq_mutex; /* mutex is used to allocate eq */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h new file mode 100644 index 00000000000000..542fcb20442a39 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MBX_INFO_H +#define SSS_MBX_INFO_H +#include +#include +#include +#include +#include + +#include "sss_hw_mbx.h" + +enum sss_mbx_event_state { + SSS_EVENT_START = 0, + SSS_EVENT_FAIL, + SSS_EVENT_SUCCESS, + SSS_EVENT_TIMEOUT, + SSS_EVENT_END, +}; + +struct sss_mbx_send { + u8 *data; + + u64 *wb_state; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +struct sss_mbx_dma_queue { + void *dma_buff_vaddr; + dma_addr_t dma_buff_paddr; + + u16 depth; + u16 pi; + u16 ci; +}; + +struct sss_mbx_msg_info { + u8 msg_id; + u8 state; /* can only use 1 bit */ +}; + +struct sss_msg_desc { + void *msg; + u16 msg_len; + u8 seq_id; + u8 mod; + u16 cmd; + struct sss_mbx_msg_info msg_info; +}; + +struct sss_msg_buffer { + struct sss_msg_desc resp_msg; + struct sss_msg_desc recv_msg; + + atomic_t recv_msg_cnt; +}; + +struct sss_mbx { + void *hwdev; + + u8 lock_channel_en; + u8 rsvd0[3]; + unsigned long channel_stop; + + /* lock for send mbx message and ack message */ + struct mutex mbx_send_lock; + /* lock for send mbx message */ + struct mutex msg_send_lock; + struct sss_mbx_send mbx_send; + + struct sss_mbx_dma_queue sync_msg_queue; + struct sss_mbx_dma_queue async_msg_queue; + + struct workqueue_struct *workq; + + struct sss_msg_buffer mgmt_msg; /* driver and MGMT CPU */ + struct sss_msg_buffer *host_msg; /* PPF message between hosts */ + struct sss_msg_buffer *func_msg; /* PF to VF or VF to PF */ + u16 num_func_msg; + u16 cur_msg_channel; + u8 support_h2h_msg; /* host to host */ + u8 rsvd1[3]; + /* vf receive pf/ppf callback */ + sss_vf_mbx_handler_t vf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *vf_mbx_data[SSS_MOD_TYPE_MAX]; + /* pf/ppf receive vf callback */ + sss_pf_mbx_handler_t pf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *pf_mbx_data[SSS_MOD_TYPE_MAX]; + /* ppf receive pf/ppf callback */ + sss_ppf_mbx_handler_t ppf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *ppf_mbx_data[SSS_MOD_TYPE_MAX]; + /* pf receive ppf callback */ + sss_pf_from_ppf_mbx_handler_t pf_recv_ppf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *pf_recv_ppf_mbx_data[SSS_MOD_TYPE_MAX]; + unsigned long ppf_to_pf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long ppf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long pf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long vf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + + enum sss_mbx_event_state event_flag; + /* lock for mbx event flag */ + spinlock_t mbx_lock; + + u8 send_msg_id; + u8 rsvd2[3]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h new file mode 100644 index 00000000000000..4c0c3c482dde48 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MGMT_CHANNEL_H +#define SSS_MGMT_CHANNEL_H + +#include +#include +#include +#include +#include + +#include "sss_hw_mbx.h" +#include "sss_hw_mgmt.h" +#include "sss_adm_info.h" + +/* message header define */ +#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_SHIFT 0 +#define SSS_MSG_HEADER_STATUS_SHIFT 13 +#define SSS_MSG_HEADER_SOURCE_SHIFT 15 +#define SSS_MSG_HEADER_AEQ_ID_SHIFT 16 +#define SSS_MSG_HEADER_MSG_ID_SHIFT 18 +#define SSS_MSG_HEADER_CMD_SHIFT 22 + +#define SSS_MSG_HEADER_MSG_LEN_SHIFT 32 +#define SSS_MSG_HEADER_MODULE_SHIFT 43 +#define SSS_MSG_HEADER_SEG_LEN_SHIFT 48 +#define SSS_MSG_HEADER_NO_ACK_SHIFT 54 +#define SSS_MSG_HEADER_DATA_TYPE_SHIFT 55 +#define SSS_MSG_HEADER_SEQID_SHIFT 56 +#define SSS_MSG_HEADER_LAST_SHIFT 62 +#define SSS_MSG_HEADER_DIRECTION_SHIFT 63 + +#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_MASK 0x1FFF +#define SSS_MSG_HEADER_STATUS_MASK 0x1 +#define SSS_MSG_HEADER_SOURCE_MASK 0x1 +#define SSS_MSG_HEADER_AEQ_ID_MASK 0x3 +#define SSS_MSG_HEADER_MSG_ID_MASK 0xF +#define SSS_MSG_HEADER_CMD_MASK 0x3FF + +#define SSS_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define SSS_MSG_HEADER_MODULE_MASK 0x1F +#define SSS_MSG_HEADER_SEG_LEN_MASK 0x3F +#define SSS_MSG_HEADER_NO_ACK_MASK 0x1 +#define SSS_MSG_HEADER_DATA_TYPE_MASK 0x1 +#define SSS_MSG_HEADER_SEQID_MASK 0x3F +#define SSS_MSG_HEADER_LAST_MASK 0x1 +#define SSS_MSG_HEADER_DIRECTION_MASK 0x1 + +#define SSS_GET_MSG_HEADER(val, field) \ + (((val) >> SSS_MSG_HEADER_##field##_SHIFT) & \ + SSS_MSG_HEADER_##field##_MASK) +#define SSS_SET_MSG_HEADER(val, field) \ + ((u64)(((u64)(val)) & SSS_MSG_HEADER_##field##_MASK) << \ + SSS_MSG_HEADER_##field##_SHIFT) + +enum sss_msg_ack_type { + SSS_MSG_ACK, + SSS_MSG_NO_ACK, +}; + +enum sss_data_type { + SSS_INLINE_DATA = 0, + SSS_DMA_DATA = 1, +}; + +enum sss_msg_seg_type { + SSS_NOT_LAST_SEG = 0, + SSS_LAST_SEG = 1, +}; + +enum sss_msg_direction_type { + SSS_DIRECT_SEND_MSG = 0, + SSS_RESP_MSG = 1, +}; + +enum sss_msg_src_type { + SSS_MSG_SRC_MGMT = 0, + SSS_MSG_SRC_MBX = 1, +}; + +enum sss_mgmt_msg_cb_t_state { + SSS_CALLBACK_REG = 0, + SSS_CALLBACK_RUNNING, +}; + +enum sss_pf_to_mgmt_event_state { + SSS_ADM_EVENT_UNINIT = 0, + SSS_ADM_EVENT_START, + SSS_ADM_EVENT_SUCCESS, + SSS_ADM_EVENT_FAIL, + SSS_ADM_EVENT_TIMEOUT, + SSS_ADM_EVENT_END, +}; + +struct sss_recv_msg { + void *buf; + + u16 buf_len; + u16 cmd; + + u16 msg_id; + u8 seq_id; + u8 no_ack; + + enum sss_mod_type mod; + + struct completion done; +}; + +struct sss_msg_pf_to_mgmt { + void *hwdev; + spinlock_t async_msg_lock; /* protect msg async and sync */ + + struct semaphore sync_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_buf; + void *ack_buf; + + struct sss_recv_msg recv_msg; + struct sss_recv_msg recv_resp_msg; + + u16 rsvd; + u16 async_msg_id; + u16 sync_msg_id; + struct sss_adm_msg *adm_msg[SSS_ADM_MSG_MAX]; + + sss_mgmt_msg_handler_t recv_handler[SSS_MOD_TYPE_HW_MAX]; + void *recv_data[SSS_MOD_TYPE_HW_MAX]; + unsigned long recv_handler_state[SSS_MOD_TYPE_HW_MAX]; + void *async_msg_cb_data[SSS_MOD_TYPE_HW_MAX]; + + /* lock when sending msg */ + spinlock_t sync_event_lock; /* protect event async and sync */ + enum sss_pf_to_mgmt_event_state event_state; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h new file mode 100644 index 00000000000000..f3b50b0d4f1dfe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MGMT_INFO_H +#define SSS_MGMT_INFO_H + +#include + +#include "sss_hw_svc_cap.h" +#include "sss_eq_info.h" +#include "sss_irq_info.h" + +struct sss_dev_sf_svc_attr { + u8 rdma_en; + u8 rsvd[3]; +}; + +enum sss_intr_type { + SSS_INTR_TYPE_MSIX, + SSS_INTR_TYPE_MSI, + SSS_INTR_TYPE_INT, + SSS_INTR_TYPE_NONE, + + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; + +/* device service capability */ +struct sss_service_cap { + struct sss_dev_sf_svc_attr sf_svc_attr; + u16 svc_type; /* user input service type */ + u16 chip_svc_type; /* HW supported service type, reference to sss_servic_bit_define */ + + u8 host_id; + u8 ep_id; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + + /* Host global resources */ + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; /* max numbers of vf in current host */ + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 host_valid_bitmap; + u8 master_host_id; + u8 srv_multi_host_mode; + + u8 timer_pf_num; + u8 timer_pf_id_start; + u16 timer_vf_num; + u16 timer_vf_id_start; + u8 flexq_en; + u8 resvd; + + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u16 max_vf; /* max VF number that PF supported */ + u16 pseudo_vf_start_id; + u16 pseudo_vf_num; + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_bfilter_start_addr; + u16 pseudo_vf_bfilter_len; + + u16 pseudo_vf_cfg_num; + u16 virtio_vq_size; + + /* DO NOT get interrupt_type from firmware */ + enum sss_intr_type intr_type; + + u8 sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable */ + u8 lb_mode; + u8 smf_pg; + u8 rsvd[3]; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + + struct sss_nic_service_cap nic_cap; /* NIC capability */ + struct sss_rdma_service_cap rdma_cap; /* RDMA capability */ + struct sss_fc_service_cap fc_cap; /* FC capability */ + struct sss_toe_service_cap toe_cap; /* ToE capability */ + struct sss_ovs_service_cap ovs_cap; /* OVS capability */ + struct sss_ipsec_service_cap ipsec_cap; /* IPsec capability */ + struct sss_ppa_service_cap ppa_cap; /* PPA capability */ + struct sss_vbs_service_cap vbs_cap; /* VBS capability */ +}; + +struct sss_svc_cap_info { + u32 func_id; + struct sss_service_cap cap; +}; + +struct sss_mgmt_info { + void *hwdev; + struct sss_service_cap svc_cap; + struct sss_eq_info eq_info; /* CEQ */ + struct sss_irq_info irq_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h new file mode 100644 index 00000000000000..bfb29200db9f50 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_SRIOV_INFO_H +#define SSS_SRIOV_INFO_H + +#include + +enum sss_sriov_state { + SSS_SRIOV_DISABLE, + SSS_SRIOV_ENABLE, + SSS_SRIOV_PRESENT, +}; + +struct sss_sriov_info { + u8 enabled; + u8 rsvd[3]; + unsigned int vf_num; + unsigned long state; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c new file mode 100644 index 00000000000000..e71c40f7bb8c73 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c @@ -0,0 +1,723 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_pci_sriov.h" +#include "sss_pci_id_tbl.h" +#include "sss_adapter.h" +#include "sss_adapter_mgmt.h" +#include "sss_pci_global.h" +#include "sss_tool_comm.h" +#include "sss_hw_export.h" +#include "sss_tool_hw.h" +#include "sss_tool.h" + +#ifndef SSS_PF_NUM_MAX +#define SSS_PF_NUM_MAX (16) +#endif + +#define SSS_ADAPTER_CNT_TIMEOUT 10000 +#define SSS_WAIT_ADAPTER_USLEEP_MIN 9900 +#define SSS_WAIT_ADAPTER_USLEEP_MAX 10000 + +#define SSS_CHIP_NODE_HOLD_TIMEOUT (10 * 60 * 1000) +#define SSS_WAIT_CHIP_NODE_CHANGED (10 * 60 * 1000) +#define SSS_PRINT_TIMEOUT_INTERVAL 10000 +#define SSS_MICRO_SECOND 1000 +#define SSS_CHIP_NODE_USLEEP_MIN 900 +#define SSS_CHIP_NODE_USLEEP_MAX 1000 + +#define SSS_CARD_CNT_MAX 64 + +#define SSS_IS_SPU_DEV(pdev) ((pdev)->device == SSS_DEV_ID_SPU) + +enum sss_node_state { + SSS_NODE_CHANGE = BIT(0), +}; + +struct sss_chip_node_lock { + struct mutex chip_mutex; /* lock for chip list */ + unsigned long state; + atomic_t ref_cnt; +}; + +static struct sss_chip_node_lock g_chip_node_lock; + +static unsigned long g_index_bit_map; + +LIST_HEAD(g_chip_list); + +struct list_head *sss_get_chip_list(void) +{ + return &g_chip_list; +} + +void lld_dev_hold(struct sss_hal_dev *dev) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_inc(&pci_adapter->ref_cnt); +} + +void lld_dev_put(struct sss_hal_dev *dev) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_dec(&pci_adapter->ref_cnt); +} + +void sss_chip_node_lock(void) +{ + unsigned long end; + bool timeout = true; + u32 loop_cnt; + + mutex_lock(&g_chip_node_lock.chip_mutex); + + loop_cnt = 0; + end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED); + do { + if (!test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for adapter change complete for %us\n", + loop_cnt / SSS_MICRO_SECOND); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (timeout && test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + pr_warn("Wait for adapter change complete timeout when trying to get adapter lock\n"); + + loop_cnt = 0; + timeout = true; + end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED); + do { + if (!atomic_read(&g_chip_node_lock.ref_cnt)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for adapter unused for %us, reference count: %d\n", + loop_cnt / SSS_MICRO_SECOND, + atomic_read(&g_chip_node_lock.ref_cnt)); + + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, + SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (timeout && atomic_read(&g_chip_node_lock.ref_cnt)) + pr_warn("Wait for adapter unused timeout\n"); + + mutex_unlock(&g_chip_node_lock.chip_mutex); +} + +void sss_chip_node_unlock(void) +{ + clear_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state); +} + +void sss_hold_chip_node(void) +{ + unsigned long end; + u32 loop_cnt = 0; + + mutex_lock(&g_chip_node_lock.chip_mutex); + + end = jiffies + msecs_to_jiffies(SSS_CHIP_NODE_HOLD_TIMEOUT); + do { + if (!test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + break; + + loop_cnt++; + + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait adapter change complete for %us\n", + loop_cnt / SSS_MICRO_SECOND); + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + pr_warn("Wait adapter change complete timeout when trying to adapter dev\n"); + + atomic_inc(&g_chip_node_lock.ref_cnt); + mutex_unlock(&g_chip_node_lock.chip_mutex); +} + +void sss_put_chip_node(void) +{ + atomic_dec(&g_chip_node_lock.ref_cnt); +} + +void sss_pre_init(void) +{ + mutex_init(&g_chip_node_lock.chip_mutex); + atomic_set(&g_chip_node_lock.ref_cnt, 0); + sss_init_uld_lock(); +} + +struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = pci_get_drvdata(pdev); + + if (!pdev) + return NULL; + + return adapter; +} + +static bool sss_chip_node_exist(struct sss_pci_adapter *adapter, + unsigned char bus_id) +{ + struct sss_card_node *chip_node = NULL; + + sss_chip_node_lock(); + if (bus_id != 0) { + list_for_each_entry(chip_node, &g_chip_list, node) { + if (chip_node->bus_id == bus_id) { + adapter->chip_node = chip_node; + sss_chip_node_unlock(); + return true; + } + } + } else if (SSS_IS_VF_DEV(adapter->pcidev) || + SSS_IS_SPU_DEV(adapter->pcidev)) { + list_for_each_entry(chip_node, &g_chip_list, node) { + if (chip_node) { + adapter->chip_node = chip_node; + sss_chip_node_unlock(); + return true; + } + } + } + sss_chip_node_unlock(); + + return false; +} + +static unsigned char sss_get_pci_bus_id(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pf_pdev = NULL; + unsigned char bus_id = 0; + + if (!pci_is_root_bus(adapter->pcidev->bus)) + bus_id = adapter->pcidev->bus->number; + + if (bus_id == 0) + return bus_id; + + if (adapter->pcidev->is_virtfn) { + pf_pdev = adapter->pcidev->physfn; + bus_id = pf_pdev->bus->number; + } + + return bus_id; +} + +static bool sss_alloc_card_id(u8 *id) +{ + unsigned char i; + + sss_chip_node_lock(); + for (i = 0; i < SSS_CARD_CNT_MAX; i++) { + if (test_and_set_bit(i, &g_index_bit_map) == 0) { + sss_chip_node_unlock(); + *id = i; + return true; + } + } + sss_chip_node_unlock(); + + return false; +} + +static void sss_free_card_id(u8 id) +{ + clear_bit(id, &g_index_bit_map); +} + +int sss_alloc_chip_node(struct sss_pci_adapter *adapter) +{ + struct sss_card_node *chip_node = NULL; + unsigned char card_id; + unsigned char bus_id; + + bus_id = sss_get_pci_bus_id(adapter); + + if (sss_chip_node_exist(adapter, bus_id)) + return 0; + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) + return -ENOMEM; + + chip_node->bus_id = bus_id; + + if (!sss_alloc_card_id(&card_id)) { + kfree(chip_node); + sdk_err(&adapter->pcidev->dev, "chip node is exceed\n"); + return -EINVAL; + } + + if (snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", SSS_CHIP_NAME, card_id) < 0) { + sss_free_card_id(card_id); + kfree(chip_node); + return -EINVAL; + } + + INIT_LIST_HEAD(&chip_node->func_list); + sss_chip_node_lock(); + list_add_tail(&chip_node->node, &g_chip_list); + sss_chip_node_unlock(); + adapter->chip_node = chip_node; + sdk_info(&adapter->pcidev->dev, + "Success to add new chip %s to global list\n", chip_node->chip_name); + + return 0; +} + +void sss_free_chip_node(struct sss_pci_adapter *adapter) +{ + struct sss_card_node *chip_node = adapter->chip_node; + int id; + int ret; + + sss_chip_node_lock(); + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&adapter->pcidev->dev, + "Success to delete chip %s from global list\n", + chip_node->chip_name); + ret = sscanf(chip_node->chip_name, SSS_CHIP_NAME "%d", &id); + if (ret < 0) + sdk_err(&adapter->pcidev->dev, "Fail to get nic id\n"); + + sss_free_card_id(id); + kfree(chip_node); + } + sss_chip_node_unlock(); +} + +void sss_add_func_list(struct sss_pci_adapter *adapter) +{ + sss_chip_node_lock(); + list_add_tail(&adapter->node, &adapter->chip_node->func_list); + sss_chip_node_unlock(); +} + +void sss_del_func_list(struct sss_pci_adapter *adapter) +{ + sss_chip_node_lock(); + list_del(&adapter->node); + sss_chip_node_unlock(); +} + +static struct sss_card_node *sss_get_chip_node_by_hwdev(const void *hwdev) +{ + struct sss_card_node *chip_node = NULL; + struct sss_card_node *node_tmp = NULL; + struct sss_pci_adapter *dev = NULL; + + if (!hwdev) + return NULL; + + sss_hold_chip_node(); + + list_for_each_entry(node_tmp, &g_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + sss_put_chip_node(); + + return chip_node; +} + +static bool sss_is_func_valid(struct sss_pci_adapter *dev) +{ + if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF) + return false; + + return true; +} + +static int sss_get_dynamic_uld_dev_name(struct sss_pci_adapter *dev, enum sss_service_type type, + char *ifname) +{ + u32 out_size = IFNAMSIZ; + struct sss_uld_info *uld_info = sss_get_uld_info(); + + if (!uld_info[type].ioctl) + return -EFAULT; + + return uld_info[type].ioctl(dev->uld_dev[type], SSS_TOOL_GET_ULD_DEV_NAME, + NULL, 0, ifname, &out_size); +} + +static bool sss_support_service_type(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return !dev->mgmt_info->svc_cap.chip_svc_type; +} + +void sss_get_card_info(const void *hwdev, void *bufin) +{ + struct sss_card_node *chip_node = NULL; + struct sss_tool_card_info *info = (struct sss_tool_card_info *)bufin; + struct sss_pci_adapter *dev = NULL; + void *fun_hwdev = NULL; + u32 i = 0; + + info->pf_num = 0; + + chip_node = sss_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + sss_hold_chip_node(); + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!sss_is_func_valid(dev)) + continue; + + fun_hwdev = dev->hwdev; + + if (sss_support_nic(fun_hwdev)) { + if (dev->uld_dev[SSS_SERVICE_TYPE_NIC]) { + info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_NIC); + sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC, + info->pf[i].name); + } + } + + if (sss_support_ppa(fun_hwdev, NULL)) { + if (dev->uld_dev[SSS_SERVICE_TYPE_PPA]) { + info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_PPA); + sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_PPA, + info->pf[i].name); + } + } + + if (sss_support_service_type(fun_hwdev)) + strscpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + strscpy(info->pf[i].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[i].bus_info)); + info->pf_num++; + i = info->pf_num; + } + + sss_put_chip_node(); +} + +bool sss_is_in_host(void) +{ + struct sss_card_node *node = NULL; + struct sss_pci_adapter *adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(node, &g_chip_list, node) { + list_for_each_entry(adapter, &node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + sss_put_chip_node(); + return true; + } + } + } + sss_put_chip_node(); + + return false; +} + +void sss_get_all_chip_id(void *id_info) +{ + int i = 0; + int id; + int ret; + struct sss_card_id *card_id = (struct sss_card_id *)id_info; + struct sss_card_node *node = NULL; + + sss_hold_chip_node(); + list_for_each_entry(node, &g_chip_list, node) { + ret = sscanf(node->chip_name, SSS_CHIP_NAME "%d", &id); + if (ret < 0) { + pr_err("Fail to get chip id\n"); + continue; + } + card_id->id[i] = (u32)id; + i++; + } + sss_put_chip_node(); + + card_id->num = (u32)i; +} + +void *sss_get_pcidev_hdl(void *hwdev) +{ + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + if (!hwdev) + return NULL; + + return dev->pcidev_hdl; +} + +struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev) +{ + struct sss_pci_adapter *adapter = pci_get_drvdata(hal_dev->pdev); + + return adapter->chip_node; +} + +void sss_get_card_func_info(const char *chip_name, struct sss_card_func_info *card_func) +{ + struct sss_card_node *card_node = NULL; + struct sss_pci_adapter *adapter = NULL; + struct sss_func_pdev_info *info = NULL; + + card_func->pf_num = 0; + + sss_hold_chip_node(); + + list_for_each_entry(card_node, &g_chip_list, node) { + if (strncmp(card_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + info = &card_func->pdev_info[card_func->pf_num]; + info->bar1_size = + pci_resource_len(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR); + info->bar1_pa = + pci_resource_start(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR); + + info->bar3_size = + pci_resource_len(adapter->pcidev, SSS_PCI_MGMT_REG_BAR); + info->bar3_pa = + pci_resource_start(adapter->pcidev, SSS_PCI_MGMT_REG_BAR); + + card_func->pf_num++; + if (card_func->pf_num >= SSS_PF_NUM_MAX) { + sss_put_chip_node(); + return; + } + } + } + + sss_put_chip_node(); +} + +int sss_get_pf_id(struct sss_card_node *card_node, u32 port_id, u32 *pf_id, u32 *valid) +{ + struct sss_pci_adapter *adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (SSS_TO_PHY_PORT_ID(adapter->hwdev) == port_id) { + *pf_id = sss_get_func_id(adapter->hwdev); + *valid = 1; + break; + } + } + sss_put_chip_node(); + + return 0; +} + +void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type) +{ + struct sss_pci_adapter *dev = NULL; + void *uld = NULL; + + if (!hal_dev) + return NULL; + + dev = pci_get_drvdata(hal_dev->pdev); + if (!dev) + return NULL; + + spin_lock_bh(&dev->uld_lock); + if (!dev->uld_dev[type] || !test_bit(type, &dev->uld_attach_state)) { + spin_unlock_bh(&dev->uld_lock); + return NULL; + } + uld = dev->uld_dev[type]; + + atomic_inc(&dev->uld_ref_cnt[type]); + spin_unlock_bh(&dev->uld_lock); + + return uld; +} + +void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(hal_dev->pdev); + + atomic_dec(&pci_adapter->uld_ref_cnt[type]); +} + +static bool sss_is_pcidev_match_dev_name(const char *dev_name, struct sss_pci_adapter *dev, + enum sss_service_type type) +{ + enum sss_service_type i; + char nic_uld_name[IFNAMSIZ] = {0}; + int err; + + if (type > SSS_SERVICE_TYPE_MAX) + return false; + + if (type == SSS_SERVICE_TYPE_MAX) { + for (i = SSS_SERVICE_TYPE_OVS; i < SSS_SERVICE_TYPE_MAX; i++) { + if (!strncmp(dev->uld_dev_name[i], dev_name, IFNAMSIZ)) + return true; + } + } else { + if (!strncmp(dev->uld_dev_name[type], dev_name, IFNAMSIZ)) + return true; + } + + err = sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC, (char *)nic_uld_name); + if (err == 0) { + if (!strncmp(nic_uld_name, dev_name, IFNAMSIZ)) + return true; + } + + return false; +} + +struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + sss_hold_chip_node(); + + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_is_pcidev_match_dev_name(dev_name, dev, type)) { + lld_dev_hold(&dev->hal_dev); + sss_put_chip_node(); + return &dev->hal_dev; + } + } + } + + sss_put_chip_node(); + + return NULL; +} + +static bool sss_is_pcidev_match_chip_name(const char *ifname, struct sss_pci_adapter *dev, + struct sss_card_node *chip_node, enum sss_func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (sss_get_func_type(dev->hwdev) != type) + return false; + return true; + } + + return false; +} + +static struct sss_hal_dev *sss_get_dst_type_lld_dev_by_chip_name(const char *ifname, + enum sss_func_type type) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_is_pcidev_match_chip_name(ifname, dev, chip_node, type)) + return &dev->hal_dev; + } + } + + return NULL; +} + +struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name) +{ + struct sss_hal_dev *dev = NULL; + + sss_hold_chip_node(); + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PPF); + if (dev) + goto out; + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PF); + if (dev) + goto out; + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_VF); +out: + if (dev) + lld_dev_hold(dev); + sss_put_chip_node(); + + return dev; +} + +struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + sss_hold_chip_node(); + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (SSS_TO_PHY_PORT_ID(dev->hwdev) == port_id && + !strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) { + lld_dev_hold(&dev->hal_dev); + sss_put_chip_node(); + + return &dev->hal_dev; + } + } + } + sss_put_chip_node(); + + return NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h new file mode 100644 index 00000000000000..65f51017833183 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADAPTER_MGMT_H +#define SSS_ADAPTER_MGMT_H + +#include +#include + +#include "sss_version.h" +#include "sss_adapter.h" + +#define SSS_DRV_VERSION SSS_VERSION_STR + +#define SSS_DRV_NAME "sssnic" +#define SSS_CHIP_NAME "sssnic" + +#define SSS_VF_PCI_CFG_REG_BAR 0 +#define SSS_PF_PCI_CFG_REG_BAR 1 + +#define SSS_PCI_INTR_REG_BAR 2 +#define SSS_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ +#define SSS_PCI_DB_BAR 4 + +#define SSS_IS_VF_DEV(pdev) ((pdev)->device == SSS_DEV_ID_VF) + +#define SSS_CARD_MAX_SIZE (64) + +struct sss_card_id { + u32 id[SSS_CARD_MAX_SIZE]; + u32 num; +}; + +struct sss_func_pdev_info { + u64 bar0_pa; + u64 bar0_size; + u64 bar1_pa; + u64 bar1_size; + u64 bar3_pa; + u64 bar3_size; + u64 rsvd[4]; +}; + +struct sss_card_func_info { + u32 pf_num; + u32 rsvd; + u64 usr_adm_pa; + struct sss_func_pdev_info pdev_info[SSS_CARD_MAX_SIZE]; +}; + +enum { + SSS_NO_PROBE = 1, + SSS_PROBE_START = 2, + SSS_PROBE_OK = 3, + SSS_IN_REMOVE = 4, +}; + +struct list_head *sss_get_chip_list(void); +int sss_alloc_chip_node(struct sss_pci_adapter *adapter); +void sss_free_chip_node(struct sss_pci_adapter *adapter); +void sss_pre_init(void); +struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev); +void sss_add_func_list(struct sss_pci_adapter *adapter); +void sss_del_func_list(struct sss_pci_adapter *adapter); +void sss_hold_chip_node(void); +void sss_put_chip_node(void); + +void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state); + +void lld_dev_hold(struct sss_hal_dev *dev); +void lld_dev_put(struct sss_hal_dev *dev); + +void sss_chip_node_lock(void); +void sss_chip_node_unlock(void); + +void *sss_get_pcidev_hdl(void *hwdev); +void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type); + +void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type); + +struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type); + +struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name); + +struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id); + +void sss_get_all_chip_id(void *id_info); + +void sss_get_card_func_info + (const char *chip_name, struct sss_card_func_info *card_func); + +void sss_get_card_info(const void *hwdev, void *bufin); + +bool sss_is_in_host(void); + +int sss_get_pf_id(struct sss_card_node *chip_node, u32 port_id, u32 *pf_id, u32 *valid); + +struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c new file mode 100644 index 00000000000000..452795f7bcb5b3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_common.h" + +#define SSS_MIN_SLEEP_TIME(us) ((us) - (us) / 10) + +/* Sleep more than 20ms using msleep is accurate */ +#define SSS_HANDLER_SLEEP(usleep_min, wait_once_us) \ +do { \ + if ((wait_once_us) >= 20 * USEC_PER_MSEC) \ + msleep((wait_once_us) / USEC_PER_MSEC); \ + else \ + usleep_range((usleep_min), (wait_once_us)); \ +} while (0) + +int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, struct sss_dma_addr_align *addr) +{ + dma_addr_t pa; + dma_addr_t pa_align; + void *va = NULL; + void *va_align = NULL; + + va = dma_zalloc_coherent(dev_hdl, size, &pa, flag); + if (!va) + return -ENOMEM; + + pa_align = ALIGN(pa, align); + if (pa_align == pa) { + va_align = va; + goto same_addr_after_align; + } + + dma_free_coherent(dev_hdl, size, va, pa); + + va = dma_zalloc_coherent(dev_hdl, size + align, &pa, flag); + if (!va) + return -ENOMEM; + + pa_align = ALIGN(pa, align); + va_align = (void *)((u64)va + (pa_align - pa)); + +same_addr_after_align: + addr->origin_paddr = pa; + addr->align_paddr = pa_align; + addr->origin_vaddr = va; + addr->align_vaddr = va_align; + addr->real_size = (u32)size; + + return 0; +} + +void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *addr) +{ + dma_free_coherent(dev_hdl, addr->real_size, addr->origin_vaddr, addr->origin_paddr); +} + +int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler, + u32 wait_total_ms, u32 wait_once_us) +{ + enum sss_process_ret ret; + unsigned long end; + u32 usleep_min = SSS_MIN_SLEEP_TIME(wait_once_us); + + if (!handler) + return -EINVAL; + + end = jiffies + msecs_to_jiffies(wait_total_ms); + do { + ret = handler(priv_data); + if (ret == SSS_PROCESS_OK) + return 0; + else if (ret == SSS_PROCESS_ERR) + return -EIO; + + SSS_HANDLER_SLEEP(usleep_min, wait_once_us); + } while (time_before(jiffies, end)); + + ret = handler(priv_data); + if (ret == SSS_PROCESS_OK) + return 0; + else if (ret == SSS_PROCESS_ERR) + return -EIO; + + return -ETIMEDOUT; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h new file mode 100644 index 00000000000000..36988f134d964d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_COMMON_H +#define SSS_COMMON_H + +#include + +#include "sss_hw_common.h" + +int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, struct sss_dma_addr_align *mem_align); + +void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *mem_align); + +int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler, + u32 wait_total_ms, u32 wait_once_us); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c new file mode 100644 index 00000000000000..7c81f4bee2f4b1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_version.h" +#include "sss_adapter_mgmt.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_pci_probe.h" +#include "sss_pci_remove.h" +#include "sss_pci_shutdown.h" +#include "sss_pci_error.h" +#include "sss_hwdev.h" + +#define SSS_DRV_DESC "Intelligent Network Interface Card Driver" + +MODULE_AUTHOR("steven.song@3snic.com"); +MODULE_DESCRIPTION("3SNIC Network Interface Card Driver"); +MODULE_VERSION(SSS_DRV_VERSION); +MODULE_LICENSE("GPL"); + +static const struct pci_device_id g_pci_table[] = { + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_STANDARD), 0}, + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_SPN120), 0}, + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_VF), 0}, + {0, 0} +}; + +MODULE_DEVICE_TABLE(pci, g_pci_table); + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh g_pci_driver_rh = { + .sriov_configure = sss_pci_configure_sriov, +}; +#endif + +static struct pci_error_handlers g_pci_err_handler = { + .error_detected = sss_detect_pci_error, +}; + +static struct pci_driver g_pci_driver = { + .name = SSS_DRV_NAME, + .id_table = g_pci_table, + .probe = sss_pci_probe, + .remove = sss_pci_remove, + .shutdown = sss_pci_shutdown, +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = sss_pci_configure_sriov, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &g_pci_driver_rh, +#endif + .err_handler = &g_pci_err_handler +}; + +int sss_init_pci(void) +{ + int ret; + + pr_info("%s - version %s\n", SSS_DRV_DESC, SSS_DRV_VERSION); + sss_pre_init(); + + ret = pci_register_driver(&g_pci_driver); + if (ret != 0) + return ret; + + return 0; +} + +void sss_exit_pci(void) +{ + pci_unregister_driver(&g_pci_driver); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c new file mode 100644 index 00000000000000..c825864805f31d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_api.h" + +int sss_chip_sync_time(void *hwdev, u64 mstime) +{ + int ret; + struct sss_cmd_sync_time cmd_time = {0}; + u16 out_len = sizeof(cmd_time); + + cmd_time.mstime = mstime; + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SYNC_TIME, &cmd_time, + sizeof(cmd_time), &cmd_time, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_time)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to sync time, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_time.head.state, out_len); + return -EIO; + } + + return 0; +} + +void sss_chip_disable_mgmt_channel(void *hwdev) +{ + sss_chip_set_pf_status(SSS_TO_HWIF(hwdev), SSS_PF_STATUS_INIT); +} + +int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info) +{ + int ret; + struct sss_cmd_board_info cmd_info = {0}; + u16 out_len = sizeof(cmd_info); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_BOARD_INFO, + &cmd_info, sizeof(cmd_info), &cmd_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_info)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to get board info, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_info.head.state, out_len); + return -EIO; + } + + memcpy(board_info, &cmd_info.info, sizeof(*board_info)); + + return 0; +} + +int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num) +{ + int ret; + struct sss_cmd_feature_nego cmd_feature = {0}; + u16 out_len = sizeof(cmd_feature); + + cmd_feature.func_id = sss_get_global_func_id(hwdev); + cmd_feature.opcode = opcode; + if (opcode == SSS_MGMT_MSG_SET_CMD) + memcpy(cmd_feature.feature, feature, (feature_num * sizeof(u64))); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_FEATURE_NEGO, + &cmd_feature, sizeof(cmd_feature), &cmd_feature, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_feature)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to nego feature, opcode: %d, ret: %d, status: 0x%x, out_len: 0x%x\n", + opcode, ret, cmd_feature.head.state, out_len); + return -EINVAL; + } + + if (opcode == SSS_MGMT_MSG_GET_CMD) + memcpy(feature, cmd_feature.feature, (feature_num * sizeof(u64))); + + return 0; +} + +int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id) +{ + int ret; + struct sss_cmd_bdf_info cmd_bdf = {0}; + u16 out_len = sizeof(cmd_bdf); + + cmd_bdf.bus = bus_id; + cmd_bdf.device = device_id; + cmd_bdf.function = func_id; + cmd_bdf.function_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SEND_BDF_INFO, + &cmd_bdf, sizeof(cmd_bdf), &cmd_bdf, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_bdf)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set bdf info, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_bdf.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_cmd_channel_detect cmd_detect = {0}; + u16 out_len = sizeof(cmd_detect); + + if (!hwdev) + return -EINVAL; + + cmd_detect.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CHANNEL_DETECT, + &cmd_detect, sizeof(cmd_detect), &cmd_detect, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_detect)) { + sdk_err(hwdev->dev_hdl, + "Fail to send channel detect, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, cmd_detect.head.state, out_len); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h new file mode 100644 index 00000000000000..d0471e8a9514db --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_API_H +#define SSS_HWDEV_API_H + +#include + +#include "sss_hw_mbx_msg.h" +#include "sss_hwdev.h" + +int sss_chip_sync_time(void *hwdev, u64 mstime); +int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info); +void sss_chip_disable_mgmt_channel(void *hwdev); +int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num); +int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id); +int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c new file mode 100644 index 00000000000000..412cc574a563d5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c @@ -0,0 +1,748 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwdev_cap.h" + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +/* L2NIC */ +#define SSS_CFG_MAX_QP 256 + +/* RDMA */ +#define SSS_RDMA_RSVD_QP 2 +#define SSS_ROCE_MAX_WQE (8 * K_UNIT - 1) + +#define SSS_RDMA_MAX_SQ_SGE 16 + +#define SSS_ROCE_MAX_RQ_SGE 16 + +#define SSS_RDMA_MAX_SQ_DESC_SIZE 256 + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 48B(max_task_seg_len)) */ +#define SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE 192 + +#define SSS_ROCE_MAX_RQ_DESC_SIZE 256 + +#define SSS_ROCE_QPC_ENTRY_SIZE 512 + +#define SSS_WQEBB_SIZE 64 + +#define SSS_ROCE_RDMARC_ENTRY_SIZE 32 +#define SSS_ROCE_MAX_QP_INIT_RDMA 128 +#define SSS_ROCE_MAX_QP_DEST_RDMA 128 + +#define SSS_ROCE_MAX_SRQ_WQE (16 * K_UNIT - 1) +#define SSS_ROCE_RSVD_SRQ 0 +#define SSS_ROCE_MAX_SRQ_SGE 15 +#define ROCE_SRQC_ENTERY_SIZE 64 + +#define SSS_ROCE_MAX_SRQ 0x400 +#define SSS_ROCE_MAX_CQ 0x800 +#define SSS_ROCE_MAX_QP 0x400 +#define SSS_ROCE_MAX_MPT 0x400 +#define SSS_ROCE_MAX_DRC_QP 0x40 + +#define SSS_RDMA_MAX_CQE (8 * M_UNIT - 1) +#define SSS_RDMA_RSVD_CQ 0 + +#define SSS_RDMA_CQC_ENTRY_SIZE 128 + +#define SSS_RDMA_CQE_SIZE 64 +#define SSS_RDMA_RSVD_MRW 128 +#define SSS_RDMA_MPT_ENTRY_SIZE 64 +#define SSS_RDMA_MTT_NUM (1 * G_UNIT) +#define SSS_LOG_MTT_SEG 5 +#define SSS_MTT_ENTRY_SIZE 8 +#define SSS_LOG_RDMARC_SEG 3 + +#define SSS_LOCAL_ACK_DELAY 15 +#define SSS_RDMA_PORT_NUM 1 +#define SSS_ROCE_MAX_MSG_SIZE (2 * G_UNIT) + +#define SSS_DB_PAGE_SIZE_K (4 * K_UNIT) +#define SSS_DWQE_SIZE 256 + +#define SSS_PD_NUM (128 * K_UNIT) +#define SSS_RSVD_PD 0 + +#define SSS_MAX_XRCD (64 * K_UNIT) +#define SSS_RSVD_XRCD 0 + +#define SSS_MAX_GID_PER_PORT 128 +#define SSS_GID_ENTRY_SIZE 32 +#define SSS_RSVD_LKEY ((SSS_RDMA_RSVD_MRW - 1) << 8) +#define SSS_PAGE_SIZE_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) +#define SSS_ROCE_MODE 1 + +#define SSS_MAX_FRPL_LEN 511 +#define SSS_MAX_PKEY 1 + +/* ToE */ +#define SSS_TOE_PCTX_SIZE 1024 +#define SSS_TOE_SCQC_SIZE 64 + +/* FC */ +#define SSS_FC_PQPC_SIZE 256 +#define SSS_FC_CQPC_SIZE 256 +#define SSS_FC_SQE_SIZE 128 +#define SSS_FC_SCQC_SIZE 64 +#define SSS_FC_SCQE_SIZE 64 +#define SSS_FC_SRQC_SIZE 64 +#define SSS_FC_SRQE_SIZE 32 + +/* OVS */ +#define SSS_OVS_PCTX_SIZE 512 + +/* PPA */ +#define SSS_PPA_PCTX_SIZE 512 + +/* IPsec */ +#define SSS_IPSEC_SACTX_SIZE 512 + +/* VirtIO */ +#define SSS_VIRTIO_BASE_VQ_SIZE 2048U +#define SSS_VIRTIO_DEFAULT_VQ_SIZE 8192U + +struct sss_cmd_dev_cap_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + + u8 host_id; + u8 ep_id; + u8 er_id; + u8 port_id; + + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 timer_en; + u8 host_valid_bitmap; + u8 rsvd_host; + + u16 svc_type; + u16 max_vf; + u8 flexq_en; + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u8 rsvd_func1; + u32 rsvd_func2; + + u8 sf_svc_attr; + u8 func_sf_en; + u8 lb_mode; + u8 smf_pg; + + u32 max_connect_num; + u16 max_stick2cache_num; + u16 bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + + u8 host_sf_en; + u8 master_host_id; + u8 srv_multi_host_mode; + u8 rsvd2_sr; + + u32 rsvd_func3[5]; + + /* l2nic */ + u16 nic_max_sq_id; + u16 nic_max_rq_id; + u16 nic_def_queue_num; + u16 rsvd_nic1; + u32 rsvd_nic2[2]; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + u32 roce_max_drc_qp; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + u8 roce_srq_container_mode; + u8 rsvd_roce1[3]; + u32 rsvd_roce2[5]; + + /* IPsec */ + u32 ipsec_max_sactx; + u16 ipsec_max_cq; + u16 rsvd_ipsec1; + u32 rsvd_ipsec2[2]; + + /* OVS */ + u32 ovs_max_qpc; + u32 rsvd_ovs[3]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u16 toe_max_srq; + u16 toe_srq_id_start; + u16 toe_max_mpt; + u16 toe_max_cctxt; + u32 rsvd_toe[2]; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u8 rsvd_fc1[2]; + u32 rsvd_fc2[5]; + + /* VBS */ + u16 vbs_max_volq; + u16 rsvd_vbs1; + u32 rsvd_vbs2[3]; + + u16 pseudo_vf_start_id; + u16 pseudo_vf_num; + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_bfilter_start_addr; + u16 pseudo_vf_bfilter_len; + u32 rsvd_glb[8]; +}; + +enum { + SSS_SF_SVC_FT_BIT = (1 << 0), + SSS_SF_SVC_RDMA_BIT = (1 << 1), +}; + +enum sss_cfg_cmd { + SSS_CFG_CMD_GET_CAP_CFG = 0, + SSS_CFG_CMD_GET_HOST_TIMER = 1, +}; + +static void sss_print_pubic_cap(void *dev_hdl, const struct sss_service_cap *svc_cap) +{ + sdk_info(dev_hdl, + "Get public capbility: svc_type: 0x%x, chip_svc_type: 0x%x\n", + svc_cap->svc_type, svc_cap->chip_svc_type); + sdk_info(dev_hdl, + "host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x\n", + svc_cap->host_id, svc_cap->ep_id, svc_cap->er_id, svc_cap->port_id); + sdk_info(dev_hdl, + "host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", + svc_cap->host_total_function, svc_cap->host_oq_id_mask_val, svc_cap->max_vf); + sdk_info(dev_hdl, + "pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x\n", + svc_cap->pf_num, svc_cap->pf_id_start, svc_cap->vf_num, svc_cap->vf_id_start); + sdk_info(dev_hdl, + "host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x\n", + svc_cap->host_valid_bitmap, svc_cap->master_host_id, svc_cap->srv_multi_host_mode); + sdk_info(dev_hdl, + "cos_valid_bitmap: 0x%x, port_cos_valid_bitmap: 0x%x, flexq_en: 0x%x, virtio_vq_size: 0x%x\n", + svc_cap->cos_valid_bitmap, svc_cap->port_cos_valid_bitmap, svc_cap->flexq_en, + svc_cap->virtio_vq_size); + sdk_info(dev_hdl, + "pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x, pseudo_vf_max_pctx: 0x%x\n", + svc_cap->pseudo_vf_start_id, svc_cap->pseudo_vf_num, svc_cap->pseudo_vf_max_pctx); + sdk_info(dev_hdl, + "pseudo_vf_bfilter_start_addr: 0x%x, pseudo_vf_bfilter_len: 0x%x\n", + svc_cap->pseudo_vf_bfilter_start_addr, svc_cap->pseudo_vf_bfilter_len); +} + +static void sss_parse_qmm_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, struct sss_cmd_dev_cap_cfg *cmd_cap) +{ + struct sss_dev_sf_svc_attr *sf_svc_attr = &svc_cap->sf_svc_attr; + + svc_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num; + svc_cap->pseudo_vf_cfg_num = cmd_cap->pseudo_vf_num; + svc_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id; + svc_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx; + svc_cap->pseudo_vf_bfilter_start_addr = cmd_cap->pseudo_vf_bfilter_start_addr; + svc_cap->pseudo_vf_bfilter_len = cmd_cap->pseudo_vf_bfilter_len; + + if (SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev)) + svc_cap->virtio_vq_size = (u16)(SSS_VIRTIO_BASE_VQ_SIZE << svc_cap->virtio_vq_size); + else + svc_cap->virtio_vq_size = SSS_VIRTIO_DEFAULT_VQ_SIZE; + + sf_svc_attr->rdma_en = !!(cmd_cap->sf_svc_attr & SSS_SF_SVC_RDMA_BIT); + + svc_cap->smf_pg = cmd_cap->smf_pg; + svc_cap->lb_mode = cmd_cap->lb_mode; + + svc_cap->timer_en = cmd_cap->timer_en; + svc_cap->bfilter_start_addr = cmd_cap->bfilter_start_addr; + svc_cap->bfilter_len = cmd_cap->bfilter_len; + svc_cap->host_oq_id_mask_val = cmd_cap->host_oq_id_mask_val; + svc_cap->hash_bucket_num = cmd_cap->hash_bucket_num; + svc_cap->max_stick2cache_num = cmd_cap->max_stick2cache_num; + svc_cap->max_connect_num = cmd_cap->max_connect_num; +} + +static void sss_parse_pubic_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + svc_cap->svc_type = cmd_cap->svc_type; + svc_cap->chip_svc_type = cmd_cap->svc_type; + + svc_cap->ep_id = cmd_cap->ep_id; + svc_cap->er_id = cmd_cap->er_id; + svc_cap->host_id = cmd_cap->host_id; + svc_cap->port_id = cmd_cap->port_id; + + svc_cap->host_total_function = cmd_cap->host_total_function; + svc_cap->host_valid_bitmap = cmd_cap->host_valid_bitmap; + svc_cap->master_host_id = cmd_cap->master_host_id; + svc_cap->srv_multi_host_mode = cmd_cap->srv_multi_host_mode; + + svc_cap->flexq_en = cmd_cap->flexq_en; + svc_cap->cos_valid_bitmap = cmd_cap->cos_valid_bitmap; + svc_cap->port_cos_valid_bitmap = cmd_cap->port_cos_valid_bitmap; + + if (type != SSS_FUNC_TYPE_VF) { + svc_cap->pf_num = cmd_cap->pf_num; + svc_cap->pf_id_start = cmd_cap->pf_id_start; + svc_cap->vf_num = cmd_cap->vf_num; + svc_cap->vf_id_start = cmd_cap->vf_id_start; + svc_cap->max_vf = cmd_cap->max_vf; + } else { + svc_cap->max_vf = 0; + } + + svc_cap->sf_en = (type == SSS_FUNC_TYPE_PPF) ? + (!!cmd_cap->host_sf_en) : (!!cmd_cap->func_sf_en); + + sss_parse_qmm_cap(hwdev, svc_cap, cmd_cap); + sss_print_pubic_cap(hwdev->dev_hdl, svc_cap); +} + +static void sss_parse_l2nic_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_nic_service_cap *nic_svc_cap = &svc_cap->nic_cap; + + if (!SSS_IS_NIC_TYPE(hwdev)) + return; + + nic_svc_cap->max_rq = cmd_cap->nic_max_rq_id + 1; + nic_svc_cap->max_sq = cmd_cap->nic_max_sq_id + 1; + nic_svc_cap->def_queue_num = cmd_cap->nic_def_queue_num; + + sdk_info(hwdev->dev_hdl, + "Get Nic capbility, max_sq: 0x%x, max_rq: 0x%x, def_queue_num: 0x%x\n", + nic_svc_cap->max_sq, nic_svc_cap->max_rq, nic_svc_cap->def_queue_num); + + /* Check parameters from firmware */ + if (nic_svc_cap->max_sq > SSS_CFG_MAX_QP || + nic_svc_cap->max_rq > SSS_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Exceed limit[1-%d]:sq: %u, rq: %u\n", + SSS_CFG_MAX_QP, nic_svc_cap->max_sq, nic_svc_cap->max_rq); + nic_svc_cap->max_rq = SSS_CFG_MAX_QP; + nic_svc_cap->max_sq = SSS_CFG_MAX_QP; + } +} + +static void sss_parse_fc_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_fc_service_cap *fc_svc_cap = &svc_cap->fc_cap; + struct sss_dev_fc_svc_cap *dev_fc_cap = &fc_svc_cap->dev_fc_cap; + + if (!SSS_IS_FC_TYPE(hwdev)) + return; + + /* FC without virtulization */ + if (type != SSS_FUNC_TYPE_PF && type != SSS_FUNC_TYPE_PPF) + return; + + dev_fc_cap->srq_num = cmd_cap->fc_max_srq; + dev_fc_cap->scq_num = cmd_cap->fc_max_scq; + dev_fc_cap->max_parent_qpc_num = cmd_cap->fc_max_pctx; + dev_fc_cap->max_child_qpc_num = cmd_cap->fc_max_cctx; + dev_fc_cap->child_qpc_id_start = cmd_cap->fc_cctx_id_start; + dev_fc_cap->vp_id_start = cmd_cap->fc_vp_id_start; + dev_fc_cap->vp_id_end = cmd_cap->fc_vp_id_end; + + fc_svc_cap->parent_qpc_size = SSS_FC_PQPC_SIZE; + fc_svc_cap->child_qpc_size = SSS_FC_CQPC_SIZE; + fc_svc_cap->sqe_size = SSS_FC_SQE_SIZE; + + fc_svc_cap->scqc_size = SSS_FC_SCQC_SIZE; + fc_svc_cap->scqe_size = SSS_FC_SCQE_SIZE; + + fc_svc_cap->srqc_size = SSS_FC_SRQC_SIZE; + fc_svc_cap->srqe_size = SSS_FC_SRQE_SIZE; + + sdk_info(hwdev->dev_hdl, "Get FC capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_parent_qpc_num: 0x%x, max_child_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x\n", + dev_fc_cap->max_parent_qpc_num, dev_fc_cap->max_child_qpc_num, + dev_fc_cap->scq_num, dev_fc_cap->srq_num); + sdk_info(hwdev->dev_hdl, "child_qpc_id_start: 0x%x, vp_id_start: 0x%x, vp_id_end: 0x%x\n", + dev_fc_cap->child_qpc_id_start, dev_fc_cap->vp_id_start, dev_fc_cap->vp_id_end); +} + +static void sss_init_rdma_cap_param(struct sss_hwdev *hwdev) +{ + struct sss_rdma_service_cap *rdma_svc_cap = &hwdev->mgmt_info->svc_cap.rdma_cap; + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &rdma_svc_cap->dev_rdma_cap.roce_own_cap; + + rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG; + rdma_svc_cap->log_rdmarc = SSS_LOG_RDMARC_SEG; + rdma_svc_cap->reserved_qp = SSS_RDMA_RSVD_QP; + rdma_svc_cap->max_sq_sg = SSS_RDMA_MAX_SQ_SGE; + + /* RoCE */ + roce_own_cap->qpc_entry_size = SSS_ROCE_QPC_ENTRY_SIZE; + roce_own_cap->max_wqe = SSS_ROCE_MAX_WQE; + roce_own_cap->max_rq_sg = SSS_ROCE_MAX_RQ_SGE; + roce_own_cap->max_sq_inline_data_size = SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE; + roce_own_cap->max_rq_desc_size = SSS_ROCE_MAX_RQ_DESC_SIZE; + roce_own_cap->rdmarc_entry_size = SSS_ROCE_RDMARC_ENTRY_SIZE; + roce_own_cap->max_qp_init_rdma = SSS_ROCE_MAX_QP_INIT_RDMA; + roce_own_cap->max_qp_dest_rdma = SSS_ROCE_MAX_QP_DEST_RDMA; + roce_own_cap->max_srq_wqe = SSS_ROCE_MAX_SRQ_WQE; + roce_own_cap->reserved_srq = SSS_ROCE_RSVD_SRQ; + roce_own_cap->max_srq_sge = SSS_ROCE_MAX_SRQ_SGE; + roce_own_cap->srqc_entry_size = ROCE_SRQC_ENTERY_SIZE; + roce_own_cap->max_msg_size = SSS_ROCE_MAX_MSG_SIZE; + + rdma_svc_cap->max_sq_desc_size = SSS_RDMA_MAX_SQ_DESC_SIZE; + rdma_svc_cap->wqebb_size = SSS_WQEBB_SIZE; + rdma_svc_cap->max_cqe = SSS_RDMA_MAX_CQE; + rdma_svc_cap->reserved_cq = SSS_RDMA_RSVD_CQ; + rdma_svc_cap->cqc_entry_size = SSS_RDMA_CQC_ENTRY_SIZE; + rdma_svc_cap->cqe_size = SSS_RDMA_CQE_SIZE; + rdma_svc_cap->reserved_mrw = SSS_RDMA_RSVD_MRW; + rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE; + + rdma_svc_cap->max_fmr_map = 0xff; + rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM; + rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG; + rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE; + rdma_svc_cap->log_rdmarc_seg = SSS_LOG_RDMARC_SEG; + rdma_svc_cap->local_ca_ack_delay = SSS_LOCAL_ACK_DELAY; + rdma_svc_cap->port_num = SSS_RDMA_PORT_NUM; + rdma_svc_cap->db_page_size = SSS_DB_PAGE_SIZE_K; + rdma_svc_cap->direct_wqe_size = SSS_DWQE_SIZE; + rdma_svc_cap->pd_num = SSS_PD_NUM; + rdma_svc_cap->reserved_pd = SSS_RSVD_PD; + rdma_svc_cap->max_xrcd = SSS_MAX_XRCD; + rdma_svc_cap->reserved_xrcd = SSS_RSVD_XRCD; + rdma_svc_cap->max_gid_per_port = SSS_MAX_GID_PER_PORT; + rdma_svc_cap->gid_entry_size = SSS_GID_ENTRY_SIZE; + rdma_svc_cap->reserved_lkey = SSS_RSVD_LKEY; + rdma_svc_cap->comp_vector_num = (u32)hwdev->mgmt_info->eq_info.ceq_num; + rdma_svc_cap->page_size_cap = SSS_PAGE_SIZE_CAP; + rdma_svc_cap->flag = (SSS_RDMA_BMME_FLAG_LOCAL_INV | + SSS_RDMA_BMME_FLAG_REMOTE_INV | + SSS_RDMA_BMME_FLAG_FAST_REG_WR | + SSS_RDMA_DEV_CAP_FLAG_XRC | + SSS_RDMA_DEV_CAP_FLAG_MEM_WINDOW | + SSS_RDMA_BMME_FLAG_TYPE_2_WIN | + SSS_RDMA_BMME_FLAG_WIN_TYPE_2B | + SSS_RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_svc_cap->max_frpl_len = SSS_MAX_FRPL_LEN; + rdma_svc_cap->max_pkey = SSS_MAX_PKEY; +} + +static void sss_parse_roce_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &svc_cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + if (!SSS_IS_ROCE_TYPE(hwdev)) + return; + + roce_own_cap->max_srq = cmd_cap->roce_max_srq; + roce_own_cap->max_cq = cmd_cap->roce_max_cq; + roce_own_cap->max_qp = cmd_cap->roce_max_qp; + roce_own_cap->max_mpt = cmd_cap->roce_max_mpt; + roce_own_cap->max_drc_qp = cmd_cap->roce_max_drc_qp; + + roce_own_cap->wqe_cl_size = cmd_cap->roce_wqe_cl_size; + roce_own_cap->wqe_cl_start = cmd_cap->roce_wqe_cl_start; + roce_own_cap->wqe_cl_end = cmd_cap->roce_wqe_cl_end; + + if (roce_own_cap->max_qp == 0) { + roce_own_cap->max_drc_qp = SSS_ROCE_MAX_DRC_QP; + if (type == SSS_FUNC_TYPE_PF || type == SSS_FUNC_TYPE_PPF) { + roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ; + roce_own_cap->max_cq = SSS_ROCE_MAX_CQ; + roce_own_cap->max_qp = SSS_ROCE_MAX_QP; + roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT; + } else { + roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ / 2; + roce_own_cap->max_cq = SSS_ROCE_MAX_CQ / 2; + roce_own_cap->max_qp = SSS_ROCE_MAX_QP / 2; + roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT / 2; + } + } + + sss_init_rdma_cap_param(hwdev); + + sdk_info(hwdev->dev_hdl, "Get ROCE capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_qps: 0x%x, max_srq: 0x%x, max_cq: 0x%x, max_mpt: 0x%x, max_drct: 0x%x\n", + roce_own_cap->max_qp, roce_own_cap->max_srq, roce_own_cap->max_cq, + roce_own_cap->max_mpt, roce_own_cap->max_drc_qp); + sdk_info(hwdev->dev_hdl, "wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n", + roce_own_cap->wqe_cl_start, roce_own_cap->wqe_cl_end, roce_own_cap->wqe_cl_size); +} + +static void sss_parse_rdma_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_rdma_service_cap *rdma_svc_cap = &svc_cap->rdma_cap; + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &rdma_svc_cap->dev_rdma_cap.roce_own_cap; + + if (!SSS_IS_RDMA_ENABLE(hwdev)) + return; + + roce_own_cap->dmtt_cl_start = cmd_cap->roce_dmtt_cl_start; + roce_own_cap->dmtt_cl_end = cmd_cap->roce_dmtt_cl_end; + roce_own_cap->dmtt_cl_size = cmd_cap->roce_dmtt_cl_size; + + roce_own_cap->cmtt_cl_start = cmd_cap->roce_cmtt_cl_start; + roce_own_cap->cmtt_cl_end = cmd_cap->roce_cmtt_cl_end; + roce_own_cap->cmtt_cl_size = cmd_cap->roce_cmtt_cl_size; + + rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG; + rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG; + rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE; + rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE; + rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM; + + sdk_info(hwdev->dev_hdl, "Get RDMA capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "cmtt_cl_start: 0x%x, cmtt_cl_end: 0x%x, cmtt_cl_size: 0x%x\n", + roce_own_cap->cmtt_cl_start, roce_own_cap->cmtt_cl_end, + roce_own_cap->cmtt_cl_size); + sdk_info(hwdev->dev_hdl, "dmtt_cl_start: 0x%x, dmtt_cl_end: 0x%x, dmtt_cl_size: 0x%x\n", + roce_own_cap->dmtt_cl_start, roce_own_cap->dmtt_cl_end, + roce_own_cap->dmtt_cl_size); +} + +static void sss_parse_ovs_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ovs_service_cap *ovs_cap = &svc_cap->ovs_cap; + struct sss_dev_ovs_svc_cap *dev_ovs_cap = &ovs_cap->dev_ovs_cap; + + if (!SSS_IS_OVS_TYPE(hwdev)) + return; + + dev_ovs_cap->max_pctx = cmd_cap->ovs_max_qpc; + dev_ovs_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id; + dev_ovs_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num; + dev_ovs_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx; + dev_ovs_cap->dynamic_qp_en = cmd_cap->flexq_en; + ovs_cap->pctx_size = SSS_OVS_PCTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get OVS capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "max_pctxs: 0x%x, pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x\n", + dev_ovs_cap->max_pctx, dev_ovs_cap->pseudo_vf_start_id, + dev_ovs_cap->pseudo_vf_num); + sdk_info(hwdev->dev_hdl, "pseudo_vf_max_pctx: 0x%x, dynamic_qp_en: 0x%x\n", + dev_ovs_cap->pseudo_vf_max_pctx, dev_ovs_cap->dynamic_qp_en); +} + +static void sss_parse_ppa_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ppa_service_cap *ppa_cap = &svc_cap->ppa_cap; + + if (!SSS_IS_PPA_TYPE(hwdev)) + return; + + ppa_cap->qpc_pseudo_vf_start = cmd_cap->pseudo_vf_start_id; + ppa_cap->qpc_pseudo_vf_num = cmd_cap->pseudo_vf_num; + ppa_cap->qpc_pseudo_vf_ctx_num = cmd_cap->pseudo_vf_max_pctx; + ppa_cap->bloomfilter_len = cmd_cap->pseudo_vf_bfilter_len; + ppa_cap->bloomfilter_en = !!cmd_cap->pseudo_vf_bfilter_len; + ppa_cap->pctx_size = SSS_PPA_PCTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get PPA capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "qpc_pseudo_vf_start: 0x%x, qpc_pseudo_vf_num: 0x%x, qpc_pseudo_vf_ctx_num: 0x%x\n", + ppa_cap->qpc_pseudo_vf_start, ppa_cap->qpc_pseudo_vf_num, + ppa_cap->qpc_pseudo_vf_ctx_num); +} + +static void sss_parse_toe_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_toe_service_cap *toe_svc_cap = &svc_cap->toe_cap; + struct sss_dev_toe_svc_cap *dev_toe_cap = &toe_svc_cap->dev_toe_cap; + + if (!SSS_IS_TOE_TYPE(hwdev)) + return; + + dev_toe_cap->max_srq = cmd_cap->toe_max_srq; + dev_toe_cap->max_cq = cmd_cap->toe_max_cq; + dev_toe_cap->srq_id_start = cmd_cap->toe_srq_id_start; + dev_toe_cap->max_pctx = cmd_cap->toe_max_pctx; + dev_toe_cap->max_cctxt = cmd_cap->toe_max_cctxt; + dev_toe_cap->max_mpt = cmd_cap->toe_max_mpt; + + toe_svc_cap->pctx_size = SSS_TOE_PCTX_SIZE; + toe_svc_cap->scqc_size = SSS_TOE_SCQC_SIZE; + + sdk_info(hwdev->dev_hdl, "Get TOE capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_pctx: 0x%x, max_cq: 0x%x, max_srq: 0x%x, srq_id_start: 0x%x, max_mpt: 0x%x\n", + dev_toe_cap->max_pctx, dev_toe_cap->max_cq, dev_toe_cap->max_srq, + dev_toe_cap->srq_id_start, dev_toe_cap->max_mpt); +} + +static void sss_parse_ipsec_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ipsec_service_cap *ipsec_cap = &svc_cap->ipsec_cap; + struct sss_dev_ipsec_svc_cap *dev_ipsec_cap = &ipsec_cap->dev_ipsec_cap; + + if (!SSS_IS_IPSEC_TYPE(hwdev)) + return; + + dev_ipsec_cap->max_sactx = cmd_cap->ipsec_max_sactx; + dev_ipsec_cap->max_cq = cmd_cap->ipsec_max_cq; + ipsec_cap->sactx_size = SSS_IPSEC_SACTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get IPSEC capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "max_sactx: 0x%x, max_cq: 0x%x\n", + dev_ipsec_cap->max_sactx, dev_ipsec_cap->max_cq); +} + +static void sss_parse_vbs_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_vbs_service_cap *vbs_cap = &svc_cap->vbs_cap; + + if (!SSS_IS_VBS_TYPE(hwdev)) + return; + + vbs_cap->vbs_max_volq = cmd_cap->vbs_max_volq; + + sdk_info(hwdev->dev_hdl, "Get VBS capbility, type: 0x%x, vbs_max_volq: 0x%x\n", + type, vbs_cap->vbs_max_volq); +} + +static void sss_parse_dev_cap(struct sss_hwdev *hwdev, + struct sss_cmd_dev_cap_cfg *cmd_cap, enum sss_func_type type) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + + sss_parse_pubic_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_l2nic_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_fc_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_toe_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_rdma_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_roce_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ovs_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ipsec_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ppa_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_vbs_cap(hwdev, svc_cap, cmd_cap, type); +} + +static int sss_chip_get_cap(struct sss_hwdev *hwdev, struct sss_cmd_dev_cap_cfg *cmd_cap) +{ + int ret; + u16 out_len = sizeof(*cmd_cap); + + cmd_cap->func_id = sss_get_global_func_id(hwdev); + sdk_info(hwdev->dev_hdl, "Get svc_cap, func_id: %u\n", cmd_cap->func_id); + + ret = sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_CFGM, SSS_CFG_CMD_GET_CAP_CFG, + cmd_cap, sizeof(*cmd_cap), cmd_cap, &out_len, 0, + SSS_CHANNEL_COMM); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_cap)) { + sdk_err(hwdev->dev_hdl, + "Fail to get capability, err: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_cap->head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_init_capability(struct sss_hwdev *hwdev) +{ + int ret; + enum sss_func_type type = SSS_GET_FUNC_TYPE(hwdev); + struct sss_cmd_dev_cap_cfg cmd_cap = {0}; + + if (type != SSS_FUNC_TYPE_PF && + type != SSS_FUNC_TYPE_VF && + type != SSS_FUNC_TYPE_PPF) { + sdk_err(hwdev->dev_hdl, "Unsupported PCI Function type: %d\n", type); + return -EINVAL; + } + + ret = sss_chip_get_cap(hwdev, &cmd_cap); + if (ret != 0) + return ret; + + sss_parse_dev_cap(hwdev, &cmd_cap, type); + + sdk_info(hwdev->dev_hdl, "Success to init capability\n"); + return 0; +} + +void sss_deinit_capability(struct sss_hwdev *hwdev) +{ + sdk_info(hwdev->dev_hdl, "Success to deinit capability"); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h new file mode 100644 index 00000000000000..fa4a8809e1fd58 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_CAP_H +#define SSS_HWDEV_CAP_H + +#include "sss_hwdev.h" + +int sss_init_capability(struct sss_hwdev *dev); +void sss_deinit_capability(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c new file mode 100644 index 00000000000000..0469392468273f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hw_svc_cap.h" + +#define SSS_DEFAULT_RX_BUF_SIZE_LEVEL ((u16)0xB) + +enum sss_rx_buf_size { + SSS_RX_BUF_SIZE_32B = 0x20, + SSS_RX_BUF_SIZE_64B = 0x40, + SSS_RX_BUF_SIZE_96B = 0x60, + SSS_RX_BUF_SIZE_128B = 0x80, + SSS_RX_BUF_SIZE_192B = 0xC0, + SSS_RX_BUF_SIZE_256B = 0x100, + SSS_RX_BUF_SIZE_384B = 0x180, + SSS_RX_BUF_SIZE_512B = 0x200, + SSS_RX_BUF_SIZE_768B = 0x300, + SSS_RX_BUF_SIZE_1K = 0x400, + SSS_RX_BUF_SIZE_1_5K = 0x600, + SSS_RX_BUF_SIZE_2K = 0x800, + SSS_RX_BUF_SIZE_3K = 0xC00, + SSS_RX_BUF_SIZE_4K = 0x1000, + SSS_RX_BUF_SIZE_8K = 0x2000, + SSS_RX_BUF_SIZE_16K = 0x4000, +}; + +const int sss_rx_buf_size_level[] = { + SSS_RX_BUF_SIZE_32B, + SSS_RX_BUF_SIZE_64B, + SSS_RX_BUF_SIZE_96B, + SSS_RX_BUF_SIZE_128B, + SSS_RX_BUF_SIZE_192B, + SSS_RX_BUF_SIZE_256B, + SSS_RX_BUF_SIZE_384B, + SSS_RX_BUF_SIZE_512B, + SSS_RX_BUF_SIZE_768B, + SSS_RX_BUF_SIZE_1K, + SSS_RX_BUF_SIZE_1_5K, + SSS_RX_BUF_SIZE_2K, + SSS_RX_BUF_SIZE_3K, + SSS_RX_BUF_SIZE_4K, + SSS_RX_BUF_SIZE_8K, + SSS_RX_BUF_SIZE_16K, +}; + +static u16 sss_get_rx_buf_size_level(int buf_size) +{ + u16 i; + u16 cnt = ARRAY_LEN(sss_rx_buf_size_level); + + for (i = 0; i < cnt; i++) { + if (sss_rx_buf_size_level[i] == buf_size) + return i; + } + + return SSS_DEFAULT_RX_BUF_SIZE_LEVEL; /* default 2K */ +} + +static int sss_chip_get_interrupt_cfg(void *hwdev, + struct sss_irq_cfg *intr_cfg, u16 channel) +{ + int ret; + struct sss_cmd_msix_config cmd_msix = {0}; + u16 out_len = sizeof(cmd_msix); + + cmd_msix.opcode = SSS_MGMT_MSG_GET_CMD; + cmd_msix.func_id = sss_get_global_func_id(hwdev); + cmd_msix.msix_index = intr_cfg->msix_id; + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to get intr config, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_msix.head.state, out_len, channel); + return -EINVAL; + } + + intr_cfg->lli_credit = cmd_msix.lli_credit_cnt; + intr_cfg->lli_timer = cmd_msix.lli_timer_cnt; + intr_cfg->pending = cmd_msix.pending_cnt; + intr_cfg->coalesc_timer = cmd_msix.coalesce_timer_cnt; + intr_cfg->resend_timer = cmd_msix.resend_timer_cnt; + + return 0; +} + +int sss_chip_set_msix_attr(void *hwdev, + struct sss_irq_cfg intr_cfg, u16 channel) +{ + int ret; + struct sss_irq_cfg temp_cfg = {0}; + + if (!hwdev) + return -EINVAL; + + temp_cfg.msix_id = intr_cfg.msix_id; + + ret = sss_chip_get_interrupt_cfg(hwdev, &temp_cfg, channel); + if (ret != 0) + return -EINVAL; + + if (intr_cfg.lli_set == 0) { + intr_cfg.lli_credit = temp_cfg.lli_credit; + intr_cfg.lli_timer = temp_cfg.lli_timer; + } + + if (intr_cfg.coalesc_intr_set == 0) { + intr_cfg.pending = temp_cfg.pending; + intr_cfg.coalesc_timer = temp_cfg.coalesc_timer; + intr_cfg.resend_timer = temp_cfg.resend_timer; + } + + return sss_chip_set_eq_msix_attr(hwdev, &intr_cfg, channel); +} +EXPORT_SYMBOL(sss_chip_set_msix_attr); + +void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en) +{ + u32 val; + + if (!hwdev) + return; + + val = SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID) | + SSS_SET_MSI_CLR_INDIR(!!clear_en, RESEND_TIMER_CLR); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_clear_msix_resend_bit); + +int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel) +{ + int ret = 0; + struct sss_cmd_func_reset cmd_reset = {0}; + u16 out_len = sizeof(cmd_reset); + + if (!hwdev) + return -EINVAL; + + cmd_reset.func_id = func_id; + cmd_reset.reset_flag = flag; + sdk_info(SSS_TO_DEV(hwdev), "Func reset, flag: 0x%llx, channel:0x%x\n", flag, channel); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FUNC_RESET, + &cmd_reset, sizeof(cmd_reset), &cmd_reset, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_reset)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to reset func, flag 0x%llx, ret: %d, status: 0x%x, out_len: 0x%x\n", + flag, ret, cmd_reset.head.state, out_len); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_reset_function); + +int sss_chip_set_root_ctx(void *hwdev, + u32 rq_depth, u32 sq_depth, int rx_size, u16 channel) +{ + int ret; + struct sss_cmd_root_ctxt cmd_root = {0}; + u16 out_len = sizeof(cmd_root); + + if (!hwdev) + return -EINVAL; + + cmd_root.func_id = sss_get_global_func_id(hwdev); + if (rq_depth != 0 || sq_depth != 0 || rx_size != 0) { + cmd_root.rx_buf_sz = sss_get_rx_buf_size_level(rx_size); + cmd_root.rq_depth = (u16)ilog2(rq_depth); + cmd_root.sq_depth = (u16)ilog2(sq_depth); + cmd_root.lro_en = 1; + } + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_SET_VAT, + &cmd_root, sizeof(cmd_root), &cmd_root, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_root)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set root ctx, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_root.head.state, out_len, channel); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_set_root_ctx); + +int sss_chip_clean_root_ctx(void *hwdev, u16 channel) +{ + return sss_chip_set_root_ctx(hwdev, 0, 0, 0, channel); +} +EXPORT_SYMBOL(sss_chip_clean_root_ctx); + +static int sss_get_fw_ver(struct sss_hwdev *hwdev, + enum sss_fw_ver_type fw_type, u8 *buf, u8 buf_size, u16 channel) +{ + int ret; + struct sss_cmd_get_fw_version cmd_version = {0}; + u16 out_len = sizeof(cmd_version); + + if (!hwdev || !buf) + return -EINVAL; + + cmd_version.fw_type = fw_type; + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_GET_FW_VERSION, + &cmd_version, sizeof(cmd_version), &cmd_version, + &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_version)) { + sdk_err(hwdev->dev_hdl, + "Fail to get fw version, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_version.head.state, out_len, channel); + return -EIO; + } + + ret = snprintf(buf, buf_size, "%s", cmd_version.ver); + if (ret < 0) + return -EINVAL; + + return 0; +} + +int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel) +{ + return sss_get_fw_ver(hwdev, SSS_FW_VER_TYPE_MPU, buf, + buf_size, channel); +} +EXPORT_SYMBOL(sss_get_mgmt_version); + +int sss_chip_set_func_used_state(void *hwdev, + u16 service_type, bool state, u16 channel) +{ + int ret; + struct sss_cmd_func_svc_used_state cmd_state = {0}; + u16 out_len = sizeof(cmd_state); + + if (!hwdev) + return -EINVAL; + + cmd_state.func_id = sss_get_global_func_id(hwdev); + cmd_state.svc_type = service_type; + cmd_state.used_state = !!state; + + ret = sss_sync_send_msg_ch(hwdev, + SSS_COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + &cmd_state, sizeof(cmd_state), &cmd_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_state)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set func used state, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n\n", + ret, cmd_state.head.state, out_len, channel); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_set_func_used_state); + +bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability) +{ + struct sss_hwdev *dev = hwdev; + + if (!capability || !hwdev) + return false; + + if (SSS_IS_NIC_TYPE(dev)) { + memcpy(capability, SSS_TO_NIC_CAP(hwdev), sizeof(*capability)); + return true; + } else { + return false; + } +} +EXPORT_SYMBOL(sss_get_nic_capability); + +bool sss_support_nic(void *hwdev) +{ + return (hwdev && SSS_IS_NIC_TYPE((struct sss_hwdev *)hwdev)); +} +EXPORT_SYMBOL(sss_support_nic); + +bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!SSS_IS_PPA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->mgmt_info->svc_cap.ppa_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(sss_support_ppa); + +u16 sss_get_max_sq_num(void *hwdev) +{ + if (!hwdev) { + pr_err("Get max sq num: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_MAX_SQ_NUM(hwdev); +} +EXPORT_SYMBOL(sss_get_max_sq_num); + +u8 sss_get_phy_port_id(void *hwdev) +{ + if (!hwdev) { + pr_err("Get phy port id: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_PHY_PORT_ID(hwdev); +} +EXPORT_SYMBOL(sss_get_phy_port_id); + +u16 sss_get_max_vf_num(void *hwdev) +{ + if (!hwdev) { + pr_err("Get max vf num: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_MAX_VF_NUM(hwdev); +} +EXPORT_SYMBOL(sss_get_max_vf_num); + +u16 sss_nic_intr_num(void *hwdev) +{ + struct sss_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sss_hwdev *)hwdev)->hwif; + + return hwif->attr.irq_num; +} +EXPORT_SYMBOL(sss_nic_intr_num); + +int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap) +{ + if (!hwdev) { + pr_err("Get cos valid bitmap: hwdev is NULL\n"); + return -EINVAL; + } + + *func_cos_bitmap = SSS_TO_FUNC_COS_BITMAP(hwdev); + *port_cos_bitmap = SSS_TO_PORT_COS_BITMAP(hwdev); + + return 0; +} +EXPORT_SYMBOL(sss_get_cos_valid_bitmap); + +u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type, + struct sss_irq_desc *alloc_array, u16 alloc_num) +{ + int i; + int j; + u16 need_num = alloc_num; + u16 act_num = 0; + struct sss_irq_info *irq_info = NULL; + struct sss_irq *irq = NULL; + + if (!hwdev || !alloc_array) + return 0; + + irq_info = SSS_TO_IRQ_INFO(hwdev); + irq = irq_info->irq; + + mutex_lock(&irq_info->irq_mutex); + if (irq_info->free_num == 0) { + sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n"); + mutex_unlock(&irq_info->irq_mutex); + return 0; + } + + if (alloc_num > irq_info->free_num) { + sdk_warn(SSS_TO_DEV(hwdev), "Adjust need_num to %u\n", irq_info->free_num); + need_num = irq_info->free_num; + } + + for (i = 0; i < need_num; i++) { + for (j = 0; j < irq_info->total_num; j++) { + if (irq[j].busy != SSS_CFG_FREE) + continue; + + if (irq_info->free_num == 0) { + sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n"); + mutex_unlock(&irq_info->irq_mutex); + memset(alloc_array, 0, sizeof(*alloc_array) * alloc_num); + return 0; + } + + irq[j].type = service_type; + irq[j].busy = SSS_CFG_BUSY; + + alloc_array[i].irq_id = irq[j].desc.irq_id; + alloc_array[i].msix_id = irq[j].desc.msix_id; + irq_info->free_num--; + act_num++; + + break; + } + } + + mutex_unlock(&irq_info->irq_mutex); + return act_num; +} +EXPORT_SYMBOL(sss_alloc_irq); + +void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id) +{ + int i; + struct sss_irq_info *irq_info = NULL; + struct sss_irq *irq = NULL; + + if (!hwdev) + return; + + irq_info = SSS_TO_IRQ_INFO(hwdev); + irq = irq_info->irq; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < irq_info->total_num; i++) { + if (irq_id != irq[i].desc.irq_id || + service_type != irq[i].type) + continue; + + if (irq[i].busy == SSS_CFG_FREE) + continue; + + irq[i].busy = SSS_CFG_FREE; + irq_info->free_num++; + if (irq_info->free_num > irq_info->total_num) { + sdk_err(SSS_TO_DEV(hwdev), "Free_num out of range :[0, %u]\n", + irq_info->total_num); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + + if (i >= irq_info->total_num) + sdk_warn(SSS_TO_DEV(hwdev), "Irq %u don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} +EXPORT_SYMBOL(sss_free_irq); + +void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Register event: hwdev is NULL\n"); + return; + } + + dev->event_handler = callback; + dev->event_handler_data = data; +} +EXPORT_SYMBOL(sss_register_dev_event); + +void sss_unregister_dev_event(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Unregister event: hwdev is NULL\n"); + return; + } + + dev->event_handler = NULL; + dev->event_handler_data = NULL; +} +EXPORT_SYMBOL(sss_unregister_dev_event); + +int sss_get_dev_present_flag(const void *hwdev) +{ + return hwdev && !!((struct sss_hwdev *)hwdev)->chip_present_flag; +} +EXPORT_SYMBOL(sss_get_dev_present_flag); + +u8 sss_get_max_pf_num(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_MAX_PF_NUM((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_max_pf_num); + +int sss_get_chip_present_state(void *hwdev, bool *present_state) +{ + if (!hwdev || !present_state) + return -EINVAL; + + *present_state = sss_chip_get_present_state(hwdev); + + return 0; +} +EXPORT_SYMBOL(sss_get_chip_present_state); + +void sss_fault_event_report(void *hwdev, u16 src, u16 level) +{ + if (!hwdev) + return; + + sdk_info(SSS_TO_DEV(hwdev), + "Fault event report, src: %u, level: %u\n", src, level); +} +EXPORT_SYMBOL(sss_fault_event_report); + +int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type, + void *service_adapter) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || !service_adapter || service_type >= SSS_SERVICE_TYPE_MAX) + return -EINVAL; + + if (dev->service_adapter[service_type]) + return -EINVAL; + + dev->service_adapter[service_type] = service_adapter; + + return 0; +} +EXPORT_SYMBOL(sss_register_service_adapter); + +void sss_unregister_service_adapter(void *hwdev, + enum sss_service_type service_type) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX) + return; + + dev->service_adapter[service_type] = NULL; +} +EXPORT_SYMBOL(sss_unregister_service_adapter); + +void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX) + return NULL; + + return dev->service_adapter[service_type]; +} +EXPORT_SYMBOL(sss_get_service_adapter); + +void sss_do_event_callback(void *hwdev, struct sss_event_info *event) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Event callback: hwdev is NULL\n"); + return; + } + + if (!dev->event_handler) { + sdk_info(dev->dev_hdl, "Event callback: handler is NULL\n"); + return; + } + + dev->event_handler(dev->event_handler_data, event); +} +EXPORT_SYMBOL(sss_do_event_callback); + +void sss_update_link_stats(void *hwdev, bool link_state) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (link_state) + atomic_inc(&dev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&dev->hw_stats.link_event_stats.link_down_stats); +} +EXPORT_SYMBOL(sss_update_link_stats); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c new file mode 100644 index 00000000000000..7942b93ed63f36 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_adapter.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_mgmt_info.h" +#include "sss_hwdev_mgmt_channel.h" +#include "sss_hwdev_cap.h" +#include "sss_hwdev_link.h" +#include "sss_hwdev_io_flush.h" +#include "sss_hwif_init.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" +#include "sss_hwif_mgmt_init.h" +#include "sss_hwdev_init.h" + +enum sss_host_mode { + SSS_HOST_MODE_NORMAL = 0, + SSS_HOST_MODE_VM, + SSS_HOST_MODE_BM, + SSS_HOST_MODE_MAX, +}; + +#define SSS_HWDEV_WQ_NAME "sssnic_hardware" +#define SSS_WQ_MAX_REQ 10 + +#define SSS_DETECT_PCIE_LINK_DOWN_RETRY 2 + +#define SSS_CHN_BUSY_TIMEOUT 25 + +#define SSS_HEARTBEAT_TIMER_EXPIRES 5000 +#define SSS_HEARTBEAT_PERIOD 1000 + +#define SSS_GET_PCIE_LINK_STATUS(hwdev) \ + ((hwdev)->heartbeat.pcie_link_down ? \ + SSS_EVENT_PCIE_LINK_DOWN : SSS_EVENT_HEART_LOST) + +#define SSS_SET_FUNC_HOST_MODE(hwdev, mode) \ +do { \ + if ((mode) >= SSS_FUNC_MOD_MIN && (mode) <= SSS_FUNC_MOD_MAX) { \ + (hwdev)->func_mode = (mode); \ + } else \ + (hwdev)->func_mode = SSS_FUNC_MOD_NORMAL_HOST; \ +} while (0) + +#define SSS_SYNFW_TIME_PERIOD (60 * 60 * 1000) +#define SSS_CHANNEL_DETECT_PERIOD (5 * 1000) + +#define SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT) + +typedef void (*sss_set_mode_handler_t)(struct sss_hwdev *hwdev); + +static struct sss_hwdev *sss_alloc_hwdev(void) +{ + struct sss_hwdev *hwdev; + + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return NULL; + + hwdev->chip_fault_stats = vzalloc(SSS_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) { + kfree(hwdev); + return NULL; + } + + return hwdev; +} + +static void sss_free_hwdev(struct sss_hwdev *hwdev) +{ + vfree(hwdev->chip_fault_stats); + kfree(hwdev); +} + +static void sss_init_hwdev_param(struct sss_hwdev *hwdev, + struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + hwdev->adapter_hdl = adapter; + hwdev->pcidev_hdl = pdev; + hwdev->dev_hdl = &pdev->dev; + hwdev->chip_node = adapter->chip_node; + spin_lock_init(&hwdev->channel_lock); +} + +static void sss_set_chip_present_flag(struct sss_hwdev *hwdev, bool present) +{ + hwdev->chip_present_flag = !!present; +} + +static bool sss_is_chip_abnormal(struct sss_hwdev *hwdev) +{ + u32 pcie_status; + + if (!sss_get_dev_present_flag(hwdev)) + return false; + + pcie_status = sss_chip_get_pcie_link_status(hwdev); + if (pcie_status == SSS_PCIE_LINK_DOWN) { + hwdev->heartbeat.pcie_link_down_cnt++; + sdk_warn(hwdev->dev_hdl, "Pcie link down\n"); + if (hwdev->heartbeat.pcie_link_down_cnt >= SSS_DETECT_PCIE_LINK_DOWN_RETRY) { + sss_set_chip_present_flag(hwdev, false); + sss_force_complete_all(hwdev); + hwdev->heartbeat.pcie_link_down = true; + return true; + } + + return false; + } + + if (pcie_status != SSS_PCIE_LINK_UP) { + hwdev->heartbeat.heartbeat_lost = true; + return true; + } + + hwdev->heartbeat.pcie_link_down_cnt = 0; + + return false; +} + +static void sss_update_aeq_stat(struct sss_hwdev *hwdev) +{ + if (hwdev->aeq_stat.last_recv_cnt != hwdev->aeq_stat.cur_recv_cnt) { + hwdev->aeq_stat.last_recv_cnt = hwdev->aeq_stat.cur_recv_cnt; + hwdev->aeq_stat.busy_cnt = 0; + } else { + hwdev->aeq_stat.busy_cnt++; + } +} + +static void sss_update_channel_status(struct sss_hwdev *hwdev) +{ + struct sss_card_node *node = hwdev->chip_node; + + if (!node) + return; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_PPF || + !SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) || + atomic_read(&node->channel_timeout_cnt)) + return; + + if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state)) { + sss_update_aeq_stat(hwdev); + + if (hwdev->aeq_stat.busy_cnt > SSS_CHN_BUSY_TIMEOUT) { + sdk_err(hwdev->dev_hdl, "Detect channel busy\n"); + atomic_inc(&node->channel_timeout_cnt); + } + } +} + +static void sss_heartbeat_timer_handler(struct timer_list *t) +{ + struct sss_hwdev *hwdev = from_timer(hwdev, t, heartbeat.heartbeat_timer); + + if (sss_is_chip_abnormal(hwdev)) { + queue_work(hwdev->workq, &hwdev->heartbeat.lost_work); + } else { + mod_timer(&hwdev->heartbeat.heartbeat_timer, + jiffies + msecs_to_jiffies(SSS_HEARTBEAT_PERIOD)); + } + + sss_update_channel_status(hwdev); +} + +static void sss_heartbeat_lost_handler(struct work_struct *work) +{ + u16 fault_level; + u16 pcie_src; + struct sss_event_info event_info = {0}; + struct sss_hwdev *hwdev = container_of(work, struct sss_hwdev, + heartbeat.lost_work); + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + + if (hwdev->event_handler) { + event_info.type = SSS_GET_PCIE_LINK_STATUS(hwdev); + event_info.service = SSS_EVENT_SRV_COMM; + hwdev->event_handler(hwdev->event_handler_data, &event_info); + } + + if (hwdev->heartbeat.pcie_link_down) { + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + fault_level = SSS_FAULT_LEVEL_HOST; + pcie_src = SSS_FAULT_SRC_PCIE_LINK_DOWN; + } else { + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + sss_get_global_func_id(hwdev)); + fault_level = SSS_FAULT_LEVEL_FATAL; + pcie_src = SSS_FAULT_SRC_HOST_HEARTBEAT_LOST; + } + + sss_dump_chip_err_info(hwdev); +} + +static void sss_create_heartbeat_timer(struct sss_hwdev *hwdev) +{ + timer_setup(&hwdev->heartbeat.heartbeat_timer, sss_heartbeat_timer_handler, 0); + hwdev->heartbeat.heartbeat_timer.expires = + jiffies + msecs_to_jiffies(SSS_HEARTBEAT_TIMER_EXPIRES); + add_timer(&hwdev->heartbeat.heartbeat_timer); + + INIT_WORK(&hwdev->heartbeat.lost_work, sss_heartbeat_lost_handler); +} + +static void sss_destroy_heartbeat_timer(struct sss_hwdev *hwdev) +{ + destroy_work(&hwdev->heartbeat.lost_work); + del_timer_sync(&hwdev->heartbeat.heartbeat_timer); +} + +static void sss_set_bm_host_mode(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif); + + if (host_id == svc_cap->master_host_id) + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_MASTER); + else + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_SLAVE); +} + +static void sss_set_vm_host_mode(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif); + + if (host_id == svc_cap->master_host_id) + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_MASTER); + else + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_SLAVE); +} + +static void sss_set_normal_host_mode(struct sss_hwdev *hwdev) +{ + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); +} + +static int sss_enable_multi_host(struct sss_hwdev *hwdev) +{ + if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev)) + return 0; + + if (SSS_IS_SLAVE_HOST(hwdev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), true); + + return 0; +} + +static int sss_disable_multi_host(struct sss_hwdev *hwdev) +{ + if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev)) + return 0; + + if (SSS_IS_SLAVE_HOST(hwdev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false); + + return 0; +} + +static int sss_init_host_mode(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + sss_set_mode_handler_t handler[SSS_HOST_MODE_MAX] = { + sss_set_normal_host_mode, + sss_set_vm_host_mode, + sss_set_bm_host_mode + }; + + if (SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF) { + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); + return 0; + } + + if (svc_cap->srv_multi_host_mode >= SSS_HOST_MODE_MAX) { + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); + return 0; + } + + handler[svc_cap->srv_multi_host_mode](hwdev); + + ret = sss_enable_multi_host(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init function mode\n"); + return ret; + } + + return 0; +} + +static void sss_deinit_host_mode(struct sss_hwdev *hwdev) +{ + sss_disable_multi_host(hwdev); +} + +static u64 sss_get_real_time(void) +{ + struct timeval val = {0}; + + do_gettimeofday(&val); + + return (u64)val.tv_sec * MSEC_PER_SEC + + (u64)val.tv_usec / USEC_PER_MSEC; +} + +static void sss_auto_sync_time_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct sss_hwdev *hwdev = container_of(delay, + struct sss_hwdev, sync_time_task); + int ret; + + ret = sss_chip_sync_time(hwdev, sss_get_real_time()); + if (ret != 0) + sdk_err(hwdev->dev_hdl, + "Fail to sync UTC time to firmware, errno:%d.\n", ret); + + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD)); +} + +static void sss_auto_channel_detect_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct sss_hwdev *hwdev = container_of(delay, + struct sss_hwdev, channel_detect_task); + struct sss_card_node *chip_node = NULL; + + sss_chip_comm_channel_detect(hwdev); + + chip_node = hwdev->chip_node; + if (!atomic_read(&chip_node->channel_timeout_cnt)) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD)); +} + +static void sss_hwdev_init_work(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + INIT_DELAYED_WORK(&hwdev->sync_time_task, sss_auto_sync_time_work); + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD)); + + if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + INIT_DELAYED_WORK(&hwdev->channel_detect_task, + sss_auto_channel_detect_work); + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD)); + } +} + +static void sss_hwdev_deinit_work(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + hwdev->features[0] &= ~(SSS_COMM_F_CHANNEL_DETECT); + cancel_delayed_work_sync(&hwdev->channel_detect_task); + } + + cancel_delayed_work_sync(&hwdev->sync_time_task); +} + +int sss_init_hwdev(struct sss_pci_adapter *adapter) +{ + struct sss_hwdev *hwdev; + int ret; + + hwdev = sss_alloc_hwdev(); + if (!hwdev) + return -ENOMEM; + + sss_init_hwdev_param(hwdev, adapter); + adapter->hwdev = hwdev; + + ret = sss_hwif_init(adapter); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwif\n"); + goto init_hwif_err; + } + + sss_set_chip_present_flag(hwdev, true); + + hwdev->workq = alloc_workqueue(SSS_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, SSS_WQ_MAX_REQ); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Fail to alloc hardware workq\n"); + goto alloc_workq_err; + } + + sss_create_heartbeat_timer(hwdev); + + ret = sss_init_mgmt_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt info\n"); + goto init_mgmt_info_err; + } + + ret = sss_init_mgmt_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt channel\n"); + goto init_mgmt_channel_err; + } + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + ret = sss_init_devlink(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init devlink\n"); + goto init_devlink_err; + } +#endif + + ret = sss_init_capability(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init capability\n"); + goto init_cap_err; + } + + ret = sss_init_host_mode(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init capability\n"); + goto init_multi_host_fail; + } + + sss_hwdev_init_work(hwdev); + + ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD, + hwdev->features, SSS_MAX_FEATURE_QWORD); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set comm features\n"); + goto set_feature_err; + } + + return 0; + +set_feature_err: + sss_hwdev_deinit_work(hwdev); + + sss_deinit_host_mode(hwdev); +init_multi_host_fail: + sss_deinit_capability(hwdev); + +init_cap_err: +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + sss_deinit_devlink(hwdev); + +init_devlink_err: +#endif + sss_deinit_mgmt_channel(hwdev); + +init_mgmt_channel_err: + sss_deinit_mgmt_info(hwdev); + +init_mgmt_info_err: + sss_destroy_heartbeat_timer(hwdev); + destroy_workqueue(hwdev->workq); + +alloc_workq_err: + sss_hwif_deinit(hwdev); + +init_hwif_err: + sss_free_hwdev(hwdev); + adapter->hwdev = NULL; + + return -EFAULT; +} + +void sss_deinit_hwdev(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + u64 drv_features[SSS_MAX_FEATURE_QWORD] = {0}; + + sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD, + drv_features, SSS_MAX_FEATURE_QWORD); + + sss_hwdev_deinit_work(dev); + + if (SSS_IS_MULTI_HOST(dev)) + sss_disable_multi_host(dev); + + sss_hwdev_flush_io(dev, SSS_CHANNEL_COMM); + + sss_deinit_capability(dev); + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + sss_deinit_devlink(dev); +#endif + + sss_deinit_mgmt_channel(dev); + + sss_deinit_mgmt_info(dev); + sss_destroy_heartbeat_timer(hwdev); + destroy_workqueue(dev->workq); + + sss_hwif_deinit(dev); + sss_free_hwdev(dev); +} + +void sss_hwdev_stop(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + sss_set_chip_present_flag(hwdev, false); + sdk_info(dev->dev_hdl, "Set card absent\n"); + sss_force_complete_all(dev); + sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n"); +} + +void sss_hwdev_detach(void *hwdev) +{ + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) { + sss_set_chip_present_flag(hwdev, false); + sss_force_complete_all(hwdev); + } +} + +void sss_hwdev_shutdown(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (SSS_IS_SLAVE_HOST(dev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h new file mode 100644 index 00000000000000..43f35f29588c7f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_INIT_H +#define SSS_HWDEV_INIT_H + +#include "sss_adapter.h" + +int sss_init_hwdev(struct sss_pci_adapter *adapter); +void sss_deinit_hwdev(void *hwdev); +void sss_hwdev_detach(void *hwdev); +void sss_hwdev_stop(void *hwdev); +void sss_hwdev_shutdown(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c new file mode 100644 index 00000000000000..d4213d7bb09fbc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_hwif_api.h" +#include "sss_hwif_mbx.h" +#include "sss_common.h" +#include "sss_hwdev_io_flush.h" + +#define SSS_FLR_TIMEOUT 1000 +#define SSS_FLR_TIMEOUT_ONCE 10000 + +static enum sss_process_ret sss_check_flr_finish_handler(void *priv_data) +{ + struct sss_hwif *hwif = priv_data; + enum sss_pf_status status; + + status = sss_chip_get_pf_status(hwif); + if (status == SSS_PF_STATUS_FLR_FINISH_FLAG) { + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_ACTIVE_FLAG); + return SSS_PROCESS_OK; + } + + return SSS_PROCESS_DOING; +} + +static int sss_wait_for_flr_finish(struct sss_hwif *hwif) +{ + return sss_check_handler_timeout(hwif, sss_check_flr_finish_handler, + SSS_FLR_TIMEOUT, SSS_FLR_TIMEOUT_ONCE); +} + +static int sss_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (sss_get_dev_present_flag(hwdev) == 0) + return -EPERM; + + return sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size, channel); +} + +static int sss_chip_flush_doorbell(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + struct sss_cmd_clear_doorbell clear_db = {0}; + u16 out_len = sizeof(clear_db); + int ret; + + clear_db.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FLUSH_DOORBELL, + &clear_db, sizeof(clear_db), + &clear_db, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &clear_db)) { + sdk_warn(hwdev->dev_hdl, + "Fail to flush doorbell, ret: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + ret, clear_db.head.state, out_len, channel); + if (ret == 0) + return -EFAULT; + } + + return ret; +} + +static int sss_chip_flush_resource(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + struct sss_cmd_clear_resource clr_res = {0}; + int ret; + + clr_res.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + ret = sss_msg_to_mgmt_no_ack(hwdev, SSS_MOD_TYPE_COMM, + SSS_COMM_MGMT_CMD_START_FLUSH, &clr_res, + sizeof(clr_res), channel); + if (ret != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to notice flush message, ret: %d, channel: 0x%x\n", + ret, channel); + } + + return ret; +} + +int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + int err; + int ret = 0; + + if (hwdev->chip_present_flag == 0) + return 0; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + msleep(100); + + err = sss_wait_ctrlq_stop(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to wait ctrlq stop\n"); + ret = err; + } + + sss_chip_disable_doorbell(hwif); + + err = sss_chip_flush_doorbell(hwdev, channel); + if (err != 0) + ret = err; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_FLR_START_FLAG); + else + msleep(100); + + err = sss_chip_flush_resource(hwdev, channel); + if (err != 0) + ret = err; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) { + err = sss_wait_for_flr_finish(hwif); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + } + + sss_chip_enable_doorbell(hwif); + + err = sss_reinit_ctrlq_ctx(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to reinit ctrlq ctx\n"); + ret = err; + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h new file mode 100644 index 00000000000000..4b15cd0d23f6e3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_IO_FLUSH_H +#define SSS_HWDEV_IO_FLUSH_H + +#include "sss_hwdev.h" + +int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c new file mode 100644 index 00000000000000..7b51496e27a95a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_hwdev_link.h" +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#include "sss_hw_common.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_adm.h" +#include "sss_hwif_adm_common.h" + +#define SSS_FW_MAGIC_NUM 0x5a5a1100 +#define SSS_FW_IMAGE_HEAD_SIZE 4096 +#define SSS_FW_FRAGMENT_MAX_LEN 1536 +#define SSS_FW_CFG_DEFAULT_INDEX 0xFF +#define SSS_FW_UPDATE_MGMT_TIMEOUT 3000000U +#define SSS_FW_TYPE_MAX_NUM 0x40 +#define SSS_FW_CFG_MAX_INDEX 8 +#define SSS_FW_CFG_MIN_INDEX 1 + +#ifndef DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT +#define DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT BIT(0) +#endif + +enum sss_devlink_param_id { + SSS_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + SSS_DEVLINK_PARAM_ID_ACTIVATE_FW, + SSS_DEVLINK_PARAM_ID_SWITCH_CFG, +}; + +enum sss_firmware_type { + SSS_UP_FW_UPDATE_MIN_TYPE1 = 0x0, + SSS_UP_FW_UPDATE_UP_TEXT = 0x0, + SSS_UP_FW_UPDATE_UP_DATA = 0x1, + SSS_UP_FW_UPDATE_UP_DICT = 0x2, + SSS_UP_FW_UPDATE_TILE_PCPTR = 0x3, + SSS_UP_FW_UPDATE_TILE_TEXT = 0x4, + SSS_UP_FW_UPDATE_TILE_DATA = 0x5, + SSS_UP_FW_UPDATE_TILE_DICT = 0x6, + SSS_UP_FW_UPDATE_PPE_STATE = 0x7, + SSS_UP_FW_UPDATE_PPE_BRANCH = 0x8, + SSS_UP_FW_UPDATE_PPE_EXTACT = 0x9, + SSS_UP_FW_UPDATE_MAX_TYPE1 = 0x9, + SSS_UP_FW_UPDATE_CFG0 = 0xa, + SSS_UP_FW_UPDATE_CFG1 = 0xb, + SSS_UP_FW_UPDATE_CFG2 = 0xc, + SSS_UP_FW_UPDATE_CFG3 = 0xd, + SSS_UP_FW_UPDATE_MAX_TYPE1_CFG = 0xd, + + SSS_UP_FW_UPDATE_MIN_TYPE2 = 0x14, + SSS_UP_FW_UPDATE_MAX_TYPE2 = 0x14, + + SSS_UP_FW_UPDATE_MIN_TYPE3 = 0x18, + SSS_UP_FW_UPDATE_PHY = 0x18, + SSS_UP_FW_UPDATE_BIOS = 0x19, + SSS_UP_FW_UPDATE_HLINK_ONE = 0x1a, + SSS_UP_FW_UPDATE_HLINK_TWO = 0x1b, + SSS_UP_FW_UPDATE_HLINK_THR = 0x1c, + SSS_UP_FW_UPDATE_MAX_TYPE3 = 0x1c, + + SSS_UP_FW_UPDATE_MIN_TYPE4 = 0x20, + SSS_UP_FW_UPDATE_L0FW = 0x20, + SSS_UP_FW_UPDATE_L1FW = 0x21, + SSS_UP_FW_UPDATE_BOOT = 0x22, + SSS_UP_FW_UPDATE_SEC_DICT = 0x23, + SSS_UP_FW_UPDATE_HOT_PATCH0 = 0x24, + SSS_UP_FW_UPDATE_HOT_PATCH1 = 0x25, + SSS_UP_FW_UPDATE_HOT_PATCH2 = 0x26, + SSS_UP_FW_UPDATE_HOT_PATCH3 = 0x27, + SSS_UP_FW_UPDATE_HOT_PATCH4 = 0x28, + SSS_UP_FW_UPDATE_HOT_PATCH5 = 0x29, + SSS_UP_FW_UPDATE_HOT_PATCH6 = 0x2a, + SSS_UP_FW_UPDATE_HOT_PATCH7 = 0x2b, + SSS_UP_FW_UPDATE_HOT_PATCH8 = 0x2c, + SSS_UP_FW_UPDATE_HOT_PATCH9 = 0x2d, + SSS_UP_FW_UPDATE_HOT_PATCH10 = 0x2e, + SSS_UP_FW_UPDATE_HOT_PATCH11 = 0x2f, + SSS_UP_FW_UPDATE_HOT_PATCH12 = 0x30, + SSS_UP_FW_UPDATE_HOT_PATCH13 = 0x31, + SSS_UP_FW_UPDATE_HOT_PATCH14 = 0x32, + SSS_UP_FW_UPDATE_HOT_PATCH15 = 0x33, + SSS_UP_FW_UPDATE_HOT_PATCH16 = 0x34, + SSS_UP_FW_UPDATE_HOT_PATCH17 = 0x35, + SSS_UP_FW_UPDATE_HOT_PATCH18 = 0x36, + SSS_UP_FW_UPDATE_HOT_PATCH19 = 0x37, + SSS_UP_FW_UPDATE_MAX_TYPE4 = 0x37, + + SSS_UP_FW_UPDATE_MIN_TYPE5 = 0x3a, + SSS_UP_FW_UPDATE_OPTION_ROM = 0x3a, + SSS_UP_FW_UPDATE_MAX_TYPE5 = 0x3a, + + SSS_UP_FW_UPDATE_MIN_TYPE6 = 0x3e, + SSS_UP_FW_UPDATE_MAX_TYPE6 = 0x3e, + + SSS_UP_FW_UPDATE_MIN_TYPE7 = 0x40, + SSS_UP_FW_UPDATE_MAX_TYPE7 = 0x40, +}; + +#define SSS_IMAGE_MPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_UP_TEXT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_UP_DATA) | \ + BIT_ULL(SSS_UP_FW_UPDATE_UP_DICT)) + +#define SSS_IMAGE_NPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_TILE_PCPTR) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_TEXT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_DATA) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_DICT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_STATE) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_BRANCH) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_EXTACT)) + +#define SSS_IMAGE_COLD_ALL_IN (SSS_IMAGE_MPU_ALL_IN | SSS_IMAGE_NPU_ALL_IN) + +#define SSS_IMAGE_CFG_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_CFG0) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG1) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG2) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG3)) + +#define SSS_CHECK_IMAGE_INTEGRATY(mask) \ + (((mask) & SSS_IMAGE_COLD_ALL_IN) == SSS_IMAGE_COLD_ALL_IN && \ + ((mask) & SSS_IMAGE_CFG_ALL_IN) != 0) + +#define SSS_LINK_HWDEV(link) \ + ((struct sss_hwdev *)((struct sss_devlink *)devlink_priv(link))->hwdev) + +struct sss_firmware_section { + u32 section_len; + u32 section_offset; + u32 section_version; + u32 section_type; + u32 section_crc; + u32 section_flag; +}; + +struct sss_firmware_image { + u32 fw_version; + u32 fw_len; + u32 fw_magic; + struct { + u32 section_cnt : 16; + u32 rsvd : 16; + } fw_info; + struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM]; + u32 device_id; + u32 rsvd0[101]; + u32 rsvd1[534]; + u32 bin_data; +}; + +struct sss_host_image { + struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM]; + struct { + u32 total_len; + u32 fw_version; + } image_info; + u32 section_cnt; + u32 device_id; +}; + +struct sss_cmd_update_firmware { + struct sss_mgmt_msg_head head; + + struct { + u32 sl : 1; + u32 sf : 1; + u32 flag : 1; + u32 bit_signed : 1; + u32 reserved : 12; + u32 fragment_len : 16; + } ctl_info; + + struct { + u32 section_crc; + u32 section_type; + } section_info; + + u32 total_len; + u32 section_len; + u32 section_version; + u32 section_offset; + u32 data[384]; +}; + +struct sss_cmd_activate_firmware { + struct sss_mgmt_msg_head head; + u8 index; /* 0 ~ 7 */ + u8 data[7]; +}; + +struct sss_cmd_switch_config { + struct sss_mgmt_msg_head head; + u8 index; /* 0 ~ 7 */ + u8 data[7]; +}; + +static bool sss_check_image_valid(struct sss_hwdev *hwdev, + struct sss_firmware_image *image, u32 image_size) +{ + u32 i; + u32 length = 0; + u32 cnt; + + if (image->fw_magic != SSS_FW_MAGIC_NUM) { + sdk_err(hwdev->dev_hdl, "Err fw magic: 0x%x read from file\n", image->fw_magic); + return false; + } + + cnt = image->fw_info.section_cnt; + if (cnt > SSS_FW_TYPE_MAX_NUM) { + sdk_err(hwdev->dev_hdl, "Err fw type num: 0x%x read from file\n", cnt); + return false; + } + + for (i = 0; i < cnt; i++) + length += image->section_info[i].section_len; + + if (length != image->fw_len || + (u32)(image->fw_len + SSS_FW_IMAGE_HEAD_SIZE) != image_size) { + sdk_err(hwdev->dev_hdl, "Err data size: 0x%x read from file\n", length); + return false; + } + + return true; +} + +static void sss_init_host_image(struct sss_host_image *host_image, + struct sss_firmware_image *image) +{ + int i; + + for (i = 0; i < image->fw_info.section_cnt; i++) { + memcpy(&host_image->section_info[i], &image->section_info[i], + sizeof(image->section_info[i])); + } + + host_image->image_info.fw_version = image->fw_version; + host_image->section_cnt = image->fw_info.section_cnt; + host_image->device_id = image->device_id; + host_image->image_info.total_len = image->fw_len; +} + +static bool sss_check_image_integrity(struct sss_hwdev *hwdev, + struct sss_host_image *host_image) +{ + u32 i; + u32 section_type; + u64 mask = 0; + + for (i = 0; i < host_image->section_cnt; i++) { + section_type = host_image->section_info[i].section_type; + if (mask & (1ULL << section_type)) { + sdk_err(hwdev->dev_hdl, "Duplicate section type: %u\n", section_type); + return false; + } + mask |= (1ULL << section_type); + } + + if (SSS_CHECK_IMAGE_INTEGRATY(mask)) + return true; + + sdk_err(hwdev->dev_hdl, + "Fail to check file integrity, valid: 0x%llx, current: 0x%llx\n", + (SSS_IMAGE_COLD_ALL_IN | SSS_IMAGE_CFG_ALL_IN), mask); + + return false; +} + +static bool sss_check_image_device_id(struct sss_hwdev *hwdev, u32 dev_id) +{ + struct sss_cmd_board_info info = {0}; + + if (sss_chip_get_board_info(hwdev, &info.info) != 0) { + sdk_err(hwdev->dev_hdl, "Fail to get board info\n"); + return false; + } + + if (dev_id == info.info.board_type) + return true; + + sdk_err(hwdev->dev_hdl, + "The image device type: 0x%x don't match the fw dev id: 0x%x\n", + dev_id, info.info.board_type); + + return false; +} + +static void sss_init_update_cmd_param(struct sss_cmd_update_firmware *cmd_update, + struct sss_firmware_section *info, int remain, + u32 send_offset) +{ + cmd_update->ctl_info.sl = (remain <= SSS_FW_FRAGMENT_MAX_LEN) ? true : false; + cmd_update->ctl_info.sf = (remain == info->section_len) ? true : false; + cmd_update->ctl_info.bit_signed = info->section_flag & 0x1; + cmd_update->ctl_info.fragment_len = min(remain, SSS_FW_FRAGMENT_MAX_LEN); + + cmd_update->section_info.section_crc = info->section_crc; + cmd_update->section_info.section_type = info->section_type; + + cmd_update->section_version = info->section_version; + cmd_update->section_len = info->section_len; + cmd_update->section_offset = send_offset; +} + +static int sss_chip_update_firmware(struct sss_hwdev *hwdev, + struct sss_cmd_update_firmware *cmd_update) +{ + int ret; + u16 out_len = sizeof(*cmd_update); + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, + SSS_COMM_MGMT_CMD_UPDATE_FW, cmd_update, sizeof(*cmd_update), + cmd_update, &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_update)) { + sdk_err(hwdev->dev_hdl, + "Fail to update fw, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_update->head.state, out_len); + return (cmd_update->head.state != 0) ? + cmd_update->head.state : -EIO; + } + + return 0; +} + +static int sss_update_firmware(struct sss_hwdev *hwdev, const u8 *data, + struct sss_host_image *host_image) +{ + int ret; + int remain; + u32 i; + u32 send_offset; + u32 offset; + bool flag = false; + struct sss_cmd_update_firmware *cmd_update = NULL; + + cmd_update = kzalloc(sizeof(*cmd_update), GFP_KERNEL); + if (!cmd_update) + return -ENOMEM; + + for (i = 0; i < host_image->section_cnt; i++) { + offset = host_image->section_info[i].section_offset; + remain = (int)(host_image->section_info[i].section_len); + send_offset = 0; + + while (remain > 0) { + if (flag) { + cmd_update->total_len = 0; + } else { + cmd_update->total_len = host_image->image_info.total_len; + flag = true; + } + + sss_init_update_cmd_param(cmd_update, &host_image->section_info[i], + remain, send_offset); + + memcpy(cmd_update->data, + ((data + SSS_FW_IMAGE_HEAD_SIZE) + offset) + send_offset, + cmd_update->ctl_info.fragment_len); + + ret = sss_chip_update_firmware(hwdev, cmd_update); + if (ret != 0) { + kfree(cmd_update); + return ret; + } + + send_offset += cmd_update->ctl_info.fragment_len; + remain = (int)(host_image->section_info[i].section_len - send_offset); + } + } + + kfree(cmd_update); + + return 0; +} + +static int sss_flash_update_notify(struct devlink *devlink, + const struct firmware *fw, struct sss_host_image *image, + struct netlink_ext_ack *extack) +{ + struct sss_devlink *devlink_dev = devlink_priv(devlink); + struct sss_hwdev *hwdev = devlink_dev->hwdev; + int ret; + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_begin_notify(devlink); +#endif + devlink_flash_update_status_notify(devlink, "Flash firmware begin", NULL, 0, 0); + sdk_info(hwdev->dev_hdl, "Flash firmware begin\n"); + ret = sss_update_firmware(hwdev, fw->data, image); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to flash firmware, ret: %d\n", ret); + NL_SET_ERR_MSG_MOD(extack, "Fail to flash firmware"); + devlink_flash_update_status_notify(devlink, "Fail to flash firmware", NULL, 0, 0); + } else { + sdk_info(hwdev->dev_hdl, "Flash firmware end\n"); + devlink_flash_update_status_notify(devlink, "Flash firmware end", NULL, 0, 0); + } +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_end_notify(devlink); +#endif + + return ret; +} + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM +static int sss_devlink_flash_update(struct devlink *link, const char *file_name, + const char *component, struct netlink_ext_ack *extack) +#else +static int sss_devlink_flash_update(struct devlink *link, + struct devlink_flash_update_params *param, + struct netlink_ext_ack *extack) +#endif +{ + int ret; + struct sss_host_image *host_image = NULL; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + const struct firmware *fw = NULL; +#else + const struct firmware *fw = param->fw; +#endif + + host_image = kzalloc(sizeof(*host_image), GFP_KERNEL); + if (!host_image) { + ret = -ENOMEM; + goto alloc_host_image_err; + } + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM + ret = request_firmware_direct(&fw, file_name, hwdev->dev_hdl); +#else +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + ret = request_firmware_direct(&fw, param->file_name, hwdev->dev_hdl); +#else + ret = 0; +#endif +#endif + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to request firmware\n"); + goto request_fw_err; + } + + if (!sss_check_image_valid(hwdev, (struct sss_firmware_image *)fw->data, + (u32)(fw->size))) { + sdk_err(hwdev->dev_hdl, "Fail to check image valid\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image valid"); + ret = -EINVAL; + goto check_image_err; + } + + sss_init_host_image(host_image, (struct sss_firmware_image *)fw->data); + + if (!sss_check_image_integrity(hwdev, host_image)) { + sdk_err(hwdev->dev_hdl, "Fail to check image integrity\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image integrity"); + ret = -EINVAL; + goto check_image_err; + } + + if (!sss_check_image_device_id(hwdev, host_image->device_id)) { + sdk_err(hwdev->dev_hdl, "Fail to check image device id\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image device id"); + ret = -EINVAL; + goto check_image_err; + } + + ret = sss_flash_update_notify(link, fw, host_image, extack); + +check_image_err: +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM + release_firmware(fw); +#endif + +request_fw_err: + kfree(host_image); + +alloc_host_image_err: + link_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + link_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + + return ret; +} + +static const struct devlink_ops g_devlink_ops = { +#ifdef DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS + .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT, +#endif + .flash_update = sss_devlink_flash_update, +}; + +static int sss_chip_activate_firmware(struct sss_hwdev *hwdev, u8 cfg_num) +{ + int ret; + struct sss_cmd_activate_firmware cmd_activate = {0}; + u16 out_len = sizeof(cmd_activate); + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF && + SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return -EOPNOTSUPP; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + cmd_activate.index = cfg_num; + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_ACTIVE_FW, + &cmd_activate, sizeof(cmd_activate), &cmd_activate, + &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_activate)) { + sdk_err(hwdev->dev_hdl, + "Fail to activate firmware, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_activate.head.state, out_len); + return (cmd_activate.head.state != 0) ? + cmd_activate.head.state : -EIO; + } + + return 0; +} + +static int sss_devlink_get_activate_fw_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + struct sss_devlink *link_dev = devlink_priv(link); + + param_ctx->val.vu8 = link_dev->active_cfg_id; + + return 0; +} + +static int sss_devlink_set_activate_fw_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + int ret; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + + link_dev->active_cfg_id = param_ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Begin activate firmware\n"); + + ret = sss_chip_activate_firmware(hwdev, link_dev->active_cfg_id - 1); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to activate firmware, ret: %d\n", ret); + return ret; + } + + sdk_info(hwdev->dev_hdl, "End activate firmware\n"); + + return 0; +} + +static int sss_chip_switch_config(struct sss_hwdev *hwdev, u8 cfg_num) +{ + int ret; + struct sss_cmd_switch_config cmd_switch = {0}; + u16 out_len = sizeof(cmd_switch); + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF) + return -EOPNOTSUPP; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + cmd_switch.index = cfg_num; + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_SWITCH_CFG, + &cmd_switch, sizeof(cmd_switch), &cmd_switch, + &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_switch)) { + sdk_err(hwdev->dev_hdl, + "Fail to switch cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_switch.head.state, out_len); + return (cmd_switch.head.state != 0) ? + cmd_switch.head.state : -EIO; + } + + return 0; +} + +static int sss_devlink_get_switch_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + struct sss_devlink *link_dev = devlink_priv(link); + + param_ctx->val.vu8 = link_dev->switch_cfg_id; + + return 0; +} + +static int sss_devlink_set_switch_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + int ret; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + + link_dev->switch_cfg_id = param_ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Begin switch cfg"); + + ret = sss_chip_switch_config(hwdev, link_dev->switch_cfg_id - 1); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to switch cfg, ret: %d\n", ret); + return ret; + } + + sdk_info(hwdev->dev_hdl, "End Switch cfg\n"); + + return 0; +} + +static int sss_devlink_validate_firmware_config(struct devlink *link, u32 id, + union devlink_param_value param_val, + struct netlink_ext_ack *ext_ack) +{ + struct sss_hwdev *hwdev = SSS_LINK_HWDEV(link); + + if (param_val.vu8 < SSS_FW_CFG_MIN_INDEX || + param_val.vu8 > SSS_FW_CFG_MAX_INDEX) { + sdk_err(hwdev->dev_hdl, "Firmware cfg id out of range [1,8]\n"); + NL_SET_ERR_MSG_MOD(ext_ack, "Firmware cfg id out of range [1,8]\n"); + return -ERANGE; + } + + return 0; +} + +static const struct devlink_param g_devlink_param[] = { + DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_ACTIVATE_FW, + "activate_fw", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + sss_devlink_get_activate_fw_config, + sss_devlink_set_activate_fw_config, + sss_devlink_validate_firmware_config), + DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_SWITCH_CFG, + "switch_cfg", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + sss_devlink_get_switch_config, + sss_devlink_set_switch_config, + sss_devlink_validate_firmware_config), +}; + +int sss_init_devlink(struct sss_hwdev *hwdev) +{ + int ret; + struct devlink *link = NULL; + struct pci_dev *pdev = hwdev->pcidev_hdl; + +#ifdef HAS_DEVLINK_ALLOC_SETS_DEV + link = devlink_alloc(&g_devlink_ops, sizeof(struct sss_devlink), &pdev->dev); +#else + link = devlink_alloc(&g_devlink_ops, sizeof(struct sss_devlink)); +#endif + if (!link) { + sdk_err(hwdev->dev_hdl, "Fail to alloc devlink\n"); + return -ENOMEM; + } + + hwdev->devlink_dev = devlink_priv(link); + hwdev->devlink_dev->hwdev = hwdev; + hwdev->devlink_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + hwdev->devlink_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + +#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED + ret = devlink_params_register(devlink, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n"); + goto register_err; + } +#endif + +#ifdef NO_DEVLINK_REGISTER_SETS_DEV +#ifdef DEVLINK_REGISTER_RETURN_VOID + devlink_register(link); + ret = 0; +#else + ret = devlink_register(link); +#endif + +#else + ret = devlink_register(link, &pdev->dev); +#endif + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink\n"); +#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED + devlink_params_unregister(devlink, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); +#endif + goto register_err; + } + +#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED + ret = devlink_params_register(link, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n"); + goto register_param_err; + } +#endif + devlink_params_publish(link); + + return 0; + +#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED +register_param_err: + devlink_unregister(link); +#endif + +register_err: + devlink_free(link); + + return -EFAULT; +} + +void sss_deinit_devlink(struct sss_hwdev *hwdev) +{ + struct devlink *link = priv_to_devlink(hwdev->devlink_dev); + + devlink_params_unpublish(link); + devlink_params_unregister(link, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + devlink_unregister(link); + devlink_free(link); +} +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h new file mode 100644 index 00000000000000..32714685d1612d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_LINK_H +#define SSS_HWDEV_LINK_H + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_mbx_msg.h" + +int sss_init_devlink(struct sss_hwdev *hwdev); +void sss_deinit_devlink(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c new file mode 100644 index 00000000000000..42f0c1fa15abb0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_mgmt_channel.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_hwif_aeq.h" +#include "sss_hwif_export.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_init.h" +#include "sss_hwif_mgmt_init.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_csr.h" + +#define SSS_DRV_FEATURE_DEF \ + (SSS_COMM_F_ADM | SSS_COMM_F_CLP | SSS_COMM_F_MBX_SEGMENT | \ + SSS_COMM_F_CTRLQ_NUM | SSS_COMM_F_VIRTIO_VQ_SIZE) + +#define SSS_COMM_SUPPORT_CLP(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CLP) + +#define SSS_DMA_ATTR_INDIR_ID_SHIFT 0 +#define SSS_DMA_ATTR_INDIR_ID_MASK 0x3FF + +#define SSS_SET_DMA_ATTR_INDIR_ID(val, member) \ + (((u32)(val) & SSS_DMA_ATTR_INDIR_##member##_MASK) << \ + SSS_DMA_ATTR_INDIR_##member##_SHIFT) + +#define SSS_CLEAR_DMA_ATTR_INDIR_ID(val, member) \ + ((val) & (~(SSS_DMA_ATTR_INDIR_##member##_MASK \ + << SSS_DMA_ATTR_INDIR_##member##_SHIFT))) + +#define SSS_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define SSS_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define SSS_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define SSS_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define SSS_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define SSS_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define SSS_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define SSS_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define SSS_SET_DMA_ATTR_ENTRY(val, member) \ + (((u32)(val) & SSS_DMA_ATTR_ENTRY_##member##_MASK) << \ + SSS_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define SSS_PCIE_ST_DISABLE 0 +#define SSS_PCIE_AT_DISABLE 0 +#define SSS_PCIE_PH_DISABLE 0 + +#define SSS_PCIE_MSIX_ATTR_ENTRY 0 + +#define SSS_PCIE_SNOOP 0 +#define SSS_PCIE_NO_SNOOP 1 + +#define SSS_PCIE_TPH_DISABLE 0 +#define SSS_PCIE_TPH_ENABLE 1 + +#define SSS_FAULT_LEVEL_STR_FATAL "fatal" +#define SSS_FAULT_LEVEL_STR_RESET "reset" +#define SSS_FAULT_LEVEL_STR_HOST "host" +#define SSS_FAULT_LEVEL_STR_FLR "flr" +#define SSS_FAULT_LEVEL_STR_GENERAL "general" +#define SSS_FAULT_LEVEL_STR_SUGGESTION "suggestion" +#define SSS_FAULT_LEVEL_STR_UNKNOWN "Unknown" + +#define SSS_FAULT_TYPE_STR_CHIP "chip" +#define SSS_FAULT_TYPE_STR_NPU "ucode" +#define SSS_FAULT_TYPE_STR_MEM_RD "mem rd timeout" +#define SSS_FAULT_TYPE_STR_MEM_WR "mem wr timeout" +#define SSS_FAULT_TYPE_STR_REG_RD "reg rd timeout" +#define SSS_FAULT_TYPE_STR_REG_WR "reg wr timeout" +#define SSS_FAULT_TYPE_STR_PHY "phy fault" +#define SSS_FAULT_TYPE_STR_TSENSOR "tsensor fault" +#define SSS_FAULT_TYPE_STR_UNKNOWN "Unknown" + +#define SSS_COMM_RESET_TYPE \ + ((1 << SSS_RESET_TYPE_COMM) | (1 << SSS_RESET_TYPE_COMM_CMD_CH) | \ + (1 << SSS_RESET_TYPE_FLUSH_BIT) | (1 << SSS_RESET_TYPE_MQM) | \ + (1 << SSS_RESET_TYPE_SMF) | (1 << SSS_RESET_TYPE_PF_BW_CFG)) + +#define SSS_FOUR_REG_LEN 16 + +#define SSS_X_CSR_INDEX 30 +#define SSS_DUMP_16B_PER_LINE 16 +#define SSS_DUMP_4_VAR_PER_LINE 4 + +typedef void (*sss_print_err_handler_t)(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event); + +typedef void (*sss_mgmt_event_handler_t)(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +struct sss_mgmt_event { + u16 event_type; + sss_mgmt_event_handler_t handler; +}; + +static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +static void sss_show_watchdog_mgmt_register_info(struct sss_hwdev *hwdev, + struct sss_watchdog_info *watchdog_info) +{ + u32 i; + u64 *reg = NULL; + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%llx\n", + watchdog_info->cur_time_h, watchdog_info->cur_time_l, + watchdog_info->task_id, watchdog_info->sp); + + sdk_err(hwdev->dev_hdl, + "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%llx, bottom: 0x%llx\n", + watchdog_info->cur_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%llx, elr: 0x%llx, spsr: 0x%llx, far: 0x%llx, esr: 0x%llx, xzr: 0x%llx\n", + watchdog_info->pc, watchdog_info->elr, watchdog_info->spsr, watchdog_info->far, + watchdog_info->esr, watchdog_info->xzr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + + reg = &watchdog_info->x30; + for (i = 0; i <= SSS_X_CSR_INDEX; i++) + sdk_err(hwdev->dev_hdl, "x%02u:0x%llx\n", + SSS_X_CSR_INDEX - i, reg[i]); +} + +static void sss_show_watchdog_stack_info(struct sss_hwdev *hwdev, + struct sss_watchdog_info *watchdog_info) +{ + u32 i; + u32 j; + u32 tmp; + u32 stack_len; + u32 *dump_addr = NULL; + + if (watchdog_info->stack_actlen <= SSS_STACK_DATA_LEN) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = SSS_STACK_DATA_LEN; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16 bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / SSS_DUMP_16B_PER_LINE); i++) { + dump_addr = (u32 *)(watchdog_info->stack_data + (u32)(i * SSS_DUMP_16B_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 0x1), *(dump_addr + 0x2), *(dump_addr + 0x3)); + } + + tmp = (stack_len % SSS_DUMP_16B_PER_LINE) / SSS_DUMP_4_VAR_PER_LINE; + for (j = 0; j < tmp; j++) { + dump_addr = (u32 *)(watchdog_info->stack_data + + (u32)(i * SSS_DUMP_16B_PER_LINE + j * SSS_DUMP_4_VAR_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } +} + +static void sss_show_watchdog_timeout_info(struct sss_hwdev *hwdev, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct sss_watchdog_info *watchdog_info = buf_in; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld\n", + in_size, sizeof(*watchdog_info)); + return; + } + + sss_show_watchdog_mgmt_register_info(hwdev, watchdog_info); + sss_show_watchdog_stack_info(hwdev, watchdog_info); + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->head.state = 0; +} + +static void sss_watchdog_timeout_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct sss_event_info event_info = {0}; + struct sss_hwdev *dev = hwdev; + + sss_show_watchdog_timeout_info(dev, buf_in, in_size, buf_out, out_size); + + if (dev->event_handler) { + event_info.type = SSS_EVENT_MGMT_WATCHDOG; + dev->event_handler(dev->event_handler_data, &event_info); + } +} + +static void sss_show_exc_info(struct sss_hwdev *hwdev, struct sss_exc_info *exc_info) +{ + u32 i; + + /* key information */ + sdk_err(hwdev->dev_hdl, "==================== Exception Info Begin ====================\n"); + sdk_err(hwdev->dev_hdl, "Exception CpuTick : 0x%08x 0x%08x\n", + exc_info->cpu_tick.tick_cnt_h, exc_info->cpu_tick.tick_cnt_l); + sdk_err(hwdev->dev_hdl, "Exception Cause : %u\n", exc_info->exc_cause); + sdk_err(hwdev->dev_hdl, "Os Version : %s\n", exc_info->os_ver); + sdk_err(hwdev->dev_hdl, "App Version : %s\n", exc_info->app_ver); + sdk_err(hwdev->dev_hdl, "CPU Type : 0x%08x\n", exc_info->cpu_type); + sdk_err(hwdev->dev_hdl, "CPU ID : 0x%08x\n", exc_info->cpu_id); + sdk_err(hwdev->dev_hdl, "Thread Type : 0x%08x\n", exc_info->thread_type); + sdk_err(hwdev->dev_hdl, "Thread ID : 0x%08x\n", exc_info->thread_id); + sdk_err(hwdev->dev_hdl, "Byte Order : 0x%08x\n", exc_info->byte_order); + sdk_err(hwdev->dev_hdl, "Nest Count : 0x%08x\n", exc_info->nest_cnt); + sdk_err(hwdev->dev_hdl, "Fatal Error Num : 0x%08x\n", exc_info->fatal_errno); + sdk_err(hwdev->dev_hdl, "Current SP : 0x%016llx\n", exc_info->uw_sp); + sdk_err(hwdev->dev_hdl, "Stack Bottom : 0x%016llx\n", exc_info->stack_bottom); + + /* register field */ + sdk_err(hwdev->dev_hdl, "Register contents when exception occur.\n"); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TTBR0", + exc_info->reg_info.ttbr0, "TTBR1", exc_info->reg_info.ttbr1); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TCR", + exc_info->reg_info.tcr, "MAIR", exc_info->reg_info.mair); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "SCTLR", + exc_info->reg_info.sctlr, "VBAR", exc_info->reg_info.vbar); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "CURRENTE1", + exc_info->reg_info.current_el, "SP", exc_info->reg_info.sp); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "ELR", + exc_info->reg_info.elr, "SPSR", exc_info->reg_info.spsr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "FAR", + exc_info->reg_info.far_r, "ESR", exc_info->reg_info.esr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx\n", "XZR", exc_info->reg_info.xzr); + + for (i = 0; i < SSS_XREGS_NUM - 1; i += 0x2) + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t XREGS[%02u]%-5s: 0x%016llx", + i, " ", exc_info->reg_info.xregs[i], + (u32)(i + 0x1U), " ", exc_info->reg_info.xregs[(u32)(i + 0x1U)]); + + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t ", SSS_XREGS_NUM - 1, " ", + exc_info->reg_info.xregs[SSS_XREGS_NUM - 1]); +} + +static void sss_lastword_report_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct sss_lastword_info *lastword_info = buf_in; + struct sss_exc_info *exc_info = &lastword_info->stack_info; + u32 stack_len = lastword_info->stack_actlen; + struct sss_hwdev *dev = hwdev; + u32 *curr_reg = NULL; + u32 reg_i; + u32 cnt; + + if (in_size != sizeof(*lastword_info)) { + sdk_err(dev->dev_hdl, "Invalid mgmt lastword, length: %u, should be %ld\n", + in_size, sizeof(*lastword_info)); + return; + } + + sss_show_exc_info(dev, exc_info); + + /* call stack dump */ + sdk_err(dev->dev_hdl, "Dump stack when exceptioin occurs, 16Bytes per line.\n"); + + cnt = stack_len / SSS_FOUR_REG_LEN; + for (reg_i = 0; reg_i < cnt; reg_i++) { + curr_reg = (u32 *)(lastword_info->stack_data + + ((u64)(u32)(reg_i * SSS_FOUR_REG_LEN))); + sdk_err(dev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *curr_reg, *(curr_reg + 0x1), *(curr_reg + 0x2), *(curr_reg + 0x3)); + } + + sdk_err(dev->dev_hdl, "==================== Exception Info End ====================\n"); +} + +const struct sss_mgmt_event g_mgmt_event_handler[] = { + { + .event_type = SSS_COMM_MGMT_CMD_FAULT_REPORT, + .handler = sss_fault_event_handler, + }, + + { + .event_type = SSS_COMM_MGMT_CMD_WATCHDOG_INFO, + .handler = sss_watchdog_timeout_event_handler, + }, + + { + .event_type = SSS_COMM_MGMT_CMD_LASTWORD_GET, + .handler = sss_lastword_report_event_handler, + }, +}; + +static void sss_print_chip_fault(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + u8 err_level; + char *level_str = NULL; + char *fault_level[SSS_FAULT_LEVEL_MAX] = { + SSS_FAULT_LEVEL_STR_FATAL, SSS_FAULT_LEVEL_STR_RESET, + SSS_FAULT_LEVEL_STR_HOST, SSS_FAULT_LEVEL_STR_FLR, + SSS_FAULT_LEVEL_STR_GENERAL, SSS_FAULT_LEVEL_STR_SUGGESTION + }; + + err_level = fault_event->info.chip.err_level; + if (err_level < SSS_FAULT_LEVEL_MAX) + level_str = fault_level[err_level]; + else + level_str = SSS_FAULT_LEVEL_STR_UNKNOWN; + + if (err_level == SSS_FAULT_LEVEL_SERIOUS_FLR) + sdk_err(hwdev->dev_hdl, "Err_level: %u [%s], func_id: %u\n", + err_level, level_str, fault_event->info.chip.func_id); + + sdk_err(hwdev->dev_hdl, "Node_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + fault_event->info.chip.node_id, fault_event->info.chip.err_type, + err_level, level_str, + fault_event->info.chip.err_csr_addr, fault_event->info.chip.err_csr_value); +} + +static void sss_print_ucode_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", + fault_event->info.ucode.cause_id, fault_event->info.ucode.core_id, + fault_event->info.ucode.c_id, fault_event->info.ucode.epc); +} + +static void sss_print_mem_rw_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_id: 0x%08x\n", + fault_event->info.mem_timeout.err_csr_ctrl, + fault_event->info.mem_timeout.err_csr_data, + fault_event->info.mem_timeout.ctrl_tab, fault_event->info.mem_timeout.mem_id); +} + +static void sss_print_reg_rw_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", fault_event->info.reg_timeout.err_csr); +} + +static void sss_print_phy_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + fault_event->info.phy_fault.op_type, fault_event->info.phy_fault.port_id, + fault_event->info.phy_fault.dev_ad, fault_event->info.phy_fault.csr_addr, + fault_event->info.phy_fault.op_data); +} + +static void sss_print_fault_info(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + struct sss_fault_event_stats *event_stats = &hwdev->hw_stats.fault_event_stats; + char *type = NULL; + char *fault_type[SSS_FAULT_TYPE_MAX] = { + SSS_FAULT_TYPE_STR_CHIP, SSS_FAULT_TYPE_STR_NPU, + SSS_FAULT_TYPE_STR_MEM_RD, SSS_FAULT_TYPE_STR_MEM_WR, + SSS_FAULT_TYPE_STR_REG_RD, SSS_FAULT_TYPE_STR_REG_WR, + SSS_FAULT_TYPE_STR_PHY, SSS_FAULT_TYPE_STR_TSENSOR + }; + sss_print_err_handler_t print_handler[] = { + sss_print_chip_fault, sss_print_ucode_err, + sss_print_mem_rw_err, sss_print_mem_rw_err, + sss_print_reg_rw_err, sss_print_reg_rw_err, + sss_print_phy_err + }; + + if (fault_event->type < SSS_FAULT_TYPE_MAX) { + type = fault_type[fault_event->type]; + atomic_inc(&event_stats->fault_type_stat[fault_event->type]); + } else { + type = SSS_FAULT_TYPE_STR_UNKNOWN; + } + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", + sss_get_global_func_id(hwdev)); + sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", fault_event->type, type); + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + fault_event->info.val[0x0], fault_event->info.val[0x1], + fault_event->info.val[0x2], fault_event->info.val[0x3]); + + sss_dump_chip_err_info(hwdev); + + if (fault_event->type >= ARRAY_LEN(print_handler)) + return; + + print_handler[fault_event->type](hwdev, fault_event); +} + +static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_hwdev *hwdev = data; + struct sss_cmd_fault_event *cmd_event = in_buf; + struct sss_event_info info; + struct sss_fault_event *fault_event = (void *)info.event_data; + + if (in_size != sizeof(*cmd_event)) { + sdk_err(hwdev->dev_hdl, "Invalid size: %u.\n", in_size); + return; + } + + sss_print_fault_info(hwdev, &cmd_event->fault_event); + + if (hwdev->event_handler) { + info.type = SSS_EVENT_FAULT; + info.service = SSS_EVENT_SRV_COMM; + memcpy(info.event_data, &cmd_event->fault_event, sizeof(cmd_event->fault_event)); + fault_event->fault_level = (cmd_event->fault_event.type == SSS_FAULT_TYPE_CHIP) ? + cmd_event->fault_event.info.chip.err_level : + SSS_FAULT_LEVEL_FATAL; + hwdev->event_handler(hwdev->event_handler_data, &info); + } +} + +static void sss_pf_handle_mgmt_event(void *data, u16 event_type, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + u32 i; + u32 num = ARRAY_LEN(g_mgmt_event_handler); + + for (i = 0; i < num; i++) { + if (event_type == g_mgmt_event_handler[i].event_type && + g_mgmt_event_handler[i].handler) { + g_mgmt_event_handler[i].handler(data, in_buf, in_size, + out_buf, out_size); + return; + } + } + + *out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + sdk_warn(SSS_TO_DEV(data), "Unsupported mgmt event %u.\n", event_type); +} + +static int sss_hwdev_init_mbx(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_mbx(hwdev); + if (ret != 0) + return ret; + + sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MBX_FROM_FUNC, sss_recv_mbx_aeq_handler); + sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MSG_FROM_MGMT, sss_mgmt_msg_aeqe_handler); + + set_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state); + + return 0; +} + +static void sss_hwdev_deinit_mbx(struct sss_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MBX_FROM_FUNC); + + if (!SSS_IS_VF(hwdev)) { + sss_unregister_pf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM); + } else { + sss_unregister_vf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT); + } + + sss_hwif_deinit_mbx(hwdev); +} + +static int sss_chip_get_global_attr(struct sss_hwdev *hwdev) +{ + int ret = 0; + struct sss_cmd_get_glb_attr attr_cmd = {0}; + u16 out_len = sizeof(attr_cmd); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_GLOBAL_ATTR, + &attr_cmd, sizeof(attr_cmd), &attr_cmd, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr_cmd)) { + sdk_err(((struct sss_hwdev *)hwdev)->dev_hdl, + "Fail to get global attr, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, attr_cmd.head.state, out_len); + return -EIO; + } + + memcpy(&hwdev->glb_attr, &attr_cmd.attr, sizeof(hwdev->glb_attr)); + + return 0; +} + +static int sss_chip_get_feature(struct sss_hwdev *hwdev) +{ + int i; + int ret; + u64 feature[SSS_MAX_FEATURE_QWORD] = {SSS_DRV_FEATURE_DEF, 0, 0, 0}; + + ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_GET_CMD, + hwdev->features, SSS_MAX_FEATURE_QWORD); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to get comm feature\n"); + return ret; + } + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_PPF) + feature[0] |= SSS_COMM_F_CHANNEL_DETECT; + + for (i = 0; i < SSS_MAX_FEATURE_QWORD; i++) + hwdev->features[i] &= feature[i]; + + return 0; +} + +static int sss_get_global_info(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_chip_get_board_info(hwdev, &hwdev->board_info); + if (ret != 0) + return ret; + + ret = sss_chip_get_feature(hwdev); + if (ret != 0) + return ret; + + ret = sss_chip_get_global_attr(hwdev); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_hwdev_deinit_adm(struct sss_hwdev *hwdev) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return; + + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_unregister_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT); + + sss_hwif_deinit_adm(hwdev); +} + +static int sss_hwdev_init_adm(struct sss_hwdev *hwdev) +{ + int ret; + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + ret = sss_hwif_init_adm(hwdev); + if (ret != 0) + return ret; + + sss_register_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM, hwdev, + sss_pf_handle_mgmt_event); + + set_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state); + + return 0; +} + +static int sss_chip_set_dma_attr_table(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_cmd_dma_attr_config attr = {0}; + u16 out_len = sizeof(attr); + + attr.ph = SSS_PCIE_PH_DISABLE; + attr.at = SSS_PCIE_AT_DISABLE; + attr.st = SSS_PCIE_ST_DISABLE; + attr.no_snooping = SSS_PCIE_SNOOP; + attr.tph_en = SSS_PCIE_TPH_DISABLE; + attr.func_id = sss_get_global_func_id(hwdev); + attr.entry_id = SSS_PCIE_MSIX_ATTR_ENTRY; + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_DMA_ATTR, &attr, sizeof(attr), + &attr, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr)) { + sdk_err(hwdev->dev_hdl, + "Fail to set dma attr, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, attr.head.state, out_len); + return -EIO; + } + + return 0; +} + +static int sss_chip_init_dma_attr(struct sss_hwdev *hwdev) +{ + u32 set; + u32 get; + u32 dst; + + set = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR); + set = SSS_CLEAR_DMA_ATTR_INDIR_ID(set, ID); + set |= SSS_SET_DMA_ATTR_INDIR_ID(SSS_PCIE_MSIX_ATTR_ENTRY, ID); + + sss_chip_write_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR, set); + + /* make sure reset dma attr */ + wmb(); + + dst = SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_TPH_DISABLE, TPH_EN) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_SNOOP, NO_SNOOPING) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_ST_DISABLE, ST) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_AT_DISABLE, AT) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_PH_DISABLE, PH); + get = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_TBL_ADDR); + + if (get == dst) + return 0; + + return sss_chip_set_dma_attr_table(hwdev); +} + +static void sss_chip_set_pf_state(struct sss_hwdev *hwdev) +{ + sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_ACTIVE_FLAG); +} + +static void sss_chip_reset_pf_state(struct sss_hwdev *hwdev) +{ + sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_INIT); +} + +static int sss_init_basic_mgmt_channel(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_aeq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init comm aeqs\n"); + return ret; + } + + ret = sss_hwdev_init_mbx(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mbx\n"); + goto init_mbx_err; + } + + ret = sss_init_aeq_msix_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeqs msix attr\n"); + goto init_aeq_msix_attr_err; + } + + return 0; + +init_aeq_msix_attr_err: + sss_hwdev_deinit_mbx(hwdev); + +init_mbx_err: + sss_hwif_deinit_aeq(hwdev); + + return ret; +} + +static void sss_free_base_mgmt_channel(struct sss_hwdev *hwdev) +{ + sss_hwdev_deinit_mbx(hwdev); + sss_hwif_deinit_aeq(hwdev); +} + +int sss_init_mgmt_channel(struct sss_hwdev *hwdev) +{ + int ret; + + /* init aeq, mbx */ + ret = sss_init_basic_mgmt_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init basic mgmt channel\n"); + return ret; + } + + ret = sss_chip_reset_function(hwdev, sss_get_global_func_id(hwdev), + SSS_COMM_RESET_TYPE, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to reset func\n"); + goto out; + } + + ret = sss_get_global_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwdev attr\n"); + goto out; + } + + ret = sss_hwdev_init_adm(hwdev); + if (ret != 0) + goto out; + + ret = sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + true, SSS_CHANNEL_COMM); + if (ret != 0) + goto set_use_state_err; + + ret = sss_chip_init_dma_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init dma attr table\n"); + goto init_dma_attr_err; + } + + ret = sss_init_ctrlq_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq channel\n"); + goto init_ctrlq_channel_err; + } + + sss_chip_set_pf_state(hwdev); + + ret = sss_aeq_register_swe_cb(hwdev, hwdev, SSS_STL_EVENT, sss_sw_aeqe_handler); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, + "Fail to register sw aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + return 0; + +register_ucode_aeqe_err: + sss_chip_reset_pf_state(hwdev); + sss_deinit_ctrlq_channel(hwdev); + +init_ctrlq_channel_err: +init_dma_attr_err: + sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + false, SSS_CHANNEL_COMM); + +set_use_state_err: + sss_hwdev_deinit_adm(hwdev); + +out: + sss_free_base_mgmt_channel(hwdev); + + return ret; +} + +void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev) +{ + sss_aeq_unregister_swe_cb(hwdev, SSS_STL_EVENT); + + sss_chip_reset_pf_state(hwdev); + + sss_deinit_ctrlq_channel(hwdev); + + sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + false, SSS_CHANNEL_COMM); + + sss_hwdev_deinit_adm(hwdev); + + sss_free_base_mgmt_channel(hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h new file mode 100644 index 00000000000000..f8ab14532b73dc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_MGMT_CHANNEL_H +#define SSS_HWDEV_MGMT_CHANNEL_H + +#include "sss_hwdev.h" + +#define SSS_STACK_DATA_LEN 1024 +#define SSS_XREGS_NUM 31 +#define SSS_MPU_LASTWORD_SIZE 1024 + +struct sss_watchdog_info { + struct sss_mgmt_msg_head head; + + u32 cur_time_h; + u32 cur_time_l; + u32 task_id; + u32 rsvd; + + u64 pc; + u64 elr; + u64 spsr; + u64 far; + u64 esr; + u64 xzr; + u64 x30; + u64 x29; + u64 x28; + u64 x27; + u64 x26; + u64 x25; + u64 x24; + u64 x23; + u64 x22; + u64 x21; + u64 x20; + u64 x19; + u64 x18; + u64 x17; + u64 x16; + u64 x15; + u64 x14; + u64 x13; + u64 x12; + u64 x11; + u64 x10; + u64 x09; + u64 x08; + u64 x07; + u64 x06; + u64 x05; + u64 x04; + u64 x03; + u64 x02; + u64 x01; + u64 x00; + + u64 stack_top; + u64 stack_bottom; + u64 sp; + u32 cur_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 stack_data[SSS_STACK_DATA_LEN]; +}; + +struct sss_cpu_tick { + u32 tick_cnt_h; /* The cycle count higher 32 bits */ + u32 tick_cnt_l; /* The cycle count lower 32 bits */ +}; + +struct sss_ax_exc_reg_info { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 sctlr; + u64 vbar; + u64 current_el; + u64 sp; + u64 elr; + u64 spsr; + u64 far_r; + u64 esr; + u64 xzr; + u64 xregs[SSS_XREGS_NUM]; /* 0~30: x30~x0 */ +}; + +struct sss_exc_info { + char os_ver[48]; /* OS version */ + char app_ver[64]; /* Product version */ + u32 exc_cause; /* Cause of exception */ + u32 thread_type; /* The thread type before the exception */ + u32 thread_id; /* Thread PID before exception */ + u16 byte_order; /* Byte order */ + u16 cpu_type; /* CPU type */ + u32 cpu_id; /* CPU ID */ + struct sss_cpu_tick cpu_tick; /* CPU Tick */ + u32 nest_cnt; /* The exception nested count */ + u32 fatal_errno; /* Fatal error code */ + u64 uw_sp; /* The stack pointer before the exception */ + u64 stack_bottom; /* Bottom of the stack before the exception */ + + /* The in-core register context information,*/ + /* 82\57 must be at 152 bytes; if it has changed, */ + /* the OS_EXC_REGINFO_OFFSET macro in sre_platform.eh must be updated */ + struct sss_ax_exc_reg_info reg_info; +}; + +struct sss_lastword_info { + struct sss_mgmt_msg_head head; + struct sss_exc_info stack_info; + + /* Stack details, Actual stack size(<=1024) */ + u32 stack_actlen; + + /* More than 1024, it will be truncated */ + u8 stack_data[SSS_MPU_LASTWORD_SIZE]; +}; + +int sss_init_mgmt_channel(struct sss_hwdev *hwdev); +void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c new file mode 100644 index 00000000000000..b500cc16a0d49a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_svc_cap.h" +#include "sss_hwif_irq.h" +#include "sss_hwdev_mgmt_info.h" + +static int sss_init_ceq_info(struct sss_hwdev *hwdev) +{ + u8 i; + struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info; + struct sss_eq_cfg *ceq = NULL; + + ceq_info->ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif); + ceq_info->remain_ceq_num = ceq_info->ceq_num; + mutex_init(&ceq_info->eq_mutex); + + sdk_info(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0x%x, remain_ceq_num = 0x%x\n", + ceq_info->ceq_num, ceq_info->remain_ceq_num); + + if (ceq_info->ceq_num == 0) { + sdk_err(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0\n"); + return -EFAULT; + } + + ceq = kcalloc(ceq_info->ceq_num, sizeof(*ceq), GFP_KERNEL); + if (!ceq) + return -ENOMEM; + + for (i = 0; i < ceq_info->ceq_num; i++) { + ceq[i].id = i + 1; + ceq[i].free = SSS_CFG_FREE; + ceq[i].type = SSS_SERVICE_TYPE_MAX; + } + ceq_info->eq = ceq; + + return 0; +} + +static void sss_deinit_ceq_info(struct sss_hwdev *hwdev) +{ + struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info; + + kfree(ceq_info->eq); +} + +int sss_init_mgmt_info(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_mgmt_info *mgmt_info; + + mgmt_info = kzalloc(sizeof(*mgmt_info), GFP_KERNEL); + if (!mgmt_info) + return -ENOMEM; + + mgmt_info->hwdev = hwdev; + hwdev->mgmt_info = mgmt_info; + + ret = sss_init_ceq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq info, ret: %d\n", ret); + goto init_ceq_info_err; + } + + ret = sss_init_irq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init irq info, ret: %d\n", ret); + goto init_irq_info_err; + } + + return 0; + +init_irq_info_err: + sss_deinit_ceq_info(hwdev); + +init_ceq_info_err: + kfree(mgmt_info); + hwdev->mgmt_info = NULL; + + return ret; +} + +void sss_deinit_mgmt_info(struct sss_hwdev *hwdev) +{ + sss_deinit_irq_info(hwdev); + sss_deinit_ceq_info(hwdev); + + kfree(hwdev->mgmt_info); + hwdev->mgmt_info = NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h new file mode 100644 index 00000000000000..78beeba092afec --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_MGMT_INFO_H +#define SSS_HWDEV_MGMT_INFO_H + +#include "sss_hwdev.h" + +int sss_init_mgmt_info(struct sss_hwdev *dev); +void sss_deinit_mgmt_info(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c new file mode 100644 index 00000000000000..0f6f2fcacb7ba4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_aeq.h" + +#define SSS_ADM_MSG_ELEM_DESC_SIZE 8 +#define SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE 8 +#define SSS_ADM_MSG_ELEM_WB_ADDR_SIZE 8 + +#define SSS_ADM_MSG_ELEM_ALIGNMENT 8 + +#define SSS_ADM_MSG_STATE_TIMEOUT 10000 + +/* adm_msg_state header */ +#define SSS_ADM_MSG_STATE_HEAD_VALID_SHIFT 0 +#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_SHIFT 16 + +#define SSS_ADM_MSG_STATE_HEAD_VALID_MASK 0xFFU +#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_MASK 0xFFU + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define SSS_ADM_MSG_STATE_HEAD_GET(val, member) \ + (((val) >> SSS_ADM_MSG_STATE_HEAD_##member##_SHIFT) & \ + SSS_ADM_MSG_STATE_HEAD_##member##_MASK) + +enum sss_adm_msg_data_format { + SSS_SGL_TYPE = 1, +}; + +enum sss_adm_msg_opt { + SSS_ADM_MSG_WRITE = 0, + SSS_ADM_MSG_READ = 1, +}; + +enum sss_adm_msg_bypass { + SSS_NO_BYPASS = 0, + SSS_BYPASS = 1, +}; + +enum sss_adm_msg_reply_aeq { + SSS_NO_TRIGGER = 0, + SSS_TRIGGER = 1, +}; + +enum sss_adm_msg_chn_code { + SSS_ADM_MSG_CHANNEL_0 = 0, +}; + +enum sss_adm_msg_chn_rsvd { + SSS_VALID_MSG_CHANNEL = 0, + SSS_INVALID_MSG_CHANNEL = 1, +}; + +#define SSS_ADM_MSG_DESC_LEN 7 + +struct sss_msg_head { + u8 state; + u8 version; + u8 reply_aeq_num; + u8 rsvd0[5]; +}; + +#define SSS_MGMT_MSG_SIZE_MIN 20 +#define SSS_MGMT_MSG_SIZE_STEP 16 +#define SSS_MGMT_MSG_RSVD_FOR_DEV 8 + +#define SSS_MSG_TO_MGMT_LEN_MAX 2016 + +#define SSS_SYNC_MSG_ID_MASK 0x7 +#define SSS_SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) +#define SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt) \ + ((pf_to_mgmt)->sync_msg_id = \ + ((pf_to_mgmt)->sync_msg_id + 1) & SSS_SYNC_MSG_ID_MASK) + +#define SSS_MGMT_MSG_TIMEOUT 20000 /* millisecond */ + +#define SSS_MSG_CB_USLEEP_MIN 900 +#define SSS_MSG_CB_USLEEP_MAX 1000 + +#define SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_len, mod, cmd, msg_id) \ + (SSS_SET_MSG_HEADER(msg_len, MSG_LEN) | \ + SSS_SET_MSG_HEADER(mod, MODULE) | \ + SSS_SET_MSG_HEADER(msg_len, SEG_LEN) | \ + SSS_SET_MSG_HEADER(SSS_MSG_ACK, NO_ACK) | \ + SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) | \ + SSS_SET_MSG_HEADER(0, SEQID) | \ + SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) | \ + SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) | \ + SSS_SET_MSG_HEADER(SSS_DIRECT_SEND_MSG, DIRECTION) | \ + SSS_SET_MSG_HEADER(cmd, CMD) | \ + SSS_SET_MSG_HEADER(SSS_MSG_SRC_MGMT, SOURCE) | \ + SSS_SET_MSG_HEADER(func_id, SRC_GLB_FUNC_ID) | \ + SSS_SET_MSG_HEADER(msg_id, MSG_ID)) + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_SHIFT 0 +#define SSSNIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define SSSNIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU +#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define SSSNIC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & SSSNIC_API_CMD_RESP_HEAD_VALID_MASK) == \ + SSSNIC_API_CMD_RESP_HEAD_VALID_CODE) + +#define SSSNIC_API_CMD_RESP_HEAD_GET(val, member) \ + (((val) >> SSSNIC_API_CMD_RESP_HEAD_##member##_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_##member##_MASK) + +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + ((u16)(((val) >> SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)) + +static u8 sss_xor_chksum_set(void *data) +{ + int id; + u8 checksum = 0; + u8 *val = data; + + for (id = 0; id < SSS_ADM_MSG_DESC_LEN; id++) + checksum ^= val[id]; + + return checksum; +} + +static void sss_chip_set_pi(struct sss_adm_msg *adm_msg) +{ + enum sss_adm_msg_type msg_type = adm_msg->msg_type; + struct sss_hwif *hwif = SSS_TO_HWDEV(adm_msg)->hwif; + u32 hw_pi_addr = SSS_CSR_ADM_MSG_PI_ADDR(msg_type); + + sss_chip_write_reg(hwif, hw_pi_addr, adm_msg->pi); +} + +static u32 sss_chip_get_ci(struct sss_adm_msg *adm_msg) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + return SSS_GET_ADM_MSG_STATE(val, CI); +} + +static void sss_dump_adm_msg_reg(struct sss_adm_msg *adm_msg) +{ + void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl; + u32 addr; + u32 val; + u16 pci_cmd = 0; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + sdk_err(dev, "Msg type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + adm_msg->msg_type, SSS_GET_ADM_MSG_STATE(val, CPLD_ERR), + SSS_GET_ADM_MSG_STATE(val, CHKSUM_ERR), + SSS_GET_ADM_MSG_STATE(val, FSM)); + + sdk_err(dev, "Adm msg hw current ci: 0x%x\n", + SSS_GET_ADM_MSG_STATE(val, CI)); + + addr = SSS_CSR_ADM_MSG_PI_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + sdk_err(dev, "Adm msg hw current pi: 0x%x\n", val); + pci_read_config_word(SSS_TO_HWDEV(adm_msg)->pcidev_hdl, PCI_COMMAND, &pci_cmd); + sdk_err(dev, "PCI command reg: 0x%x\n", pci_cmd); +} + +static int sss_adm_msg_busy(struct sss_adm_msg *adm_msg) +{ + void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + u64 resp_header; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_MULTI_READ: + case SSS_ADM_MSG_POLL_READ: + resp_header = be64_to_cpu(ctx->reply_fmt->head); + if (ctx->state && !SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", + ctx->state, adm_msg->pi, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + sss_dump_adm_msg_reg(adm_msg); + return -EBUSY; + } + break; + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + adm_msg->ci = sss_chip_get_ci(adm_msg); + + if (adm_msg->ci == SSS_MASK_ID(adm_msg, adm_msg->pi + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", + adm_msg->msg_type, adm_msg->ci, + adm_msg->pi); + sss_dump_adm_msg_reg(adm_msg); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", adm_msg->msg_type); + return -EINVAL; + } + + return 0; +} + +static void sss_prepare_elem_ctrl(u64 *elem_ctrl, enum sss_adm_msg_type msg_type) +{ + u64 control; + u8 chksum; + u16 elem_len = 0; + + switch (msg_type) { + case SSS_ADM_MSG_POLL_READ: + elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE + SSS_ADM_MSG_ELEM_WB_ADDR_SIZE + + SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT); + break; + + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE + + SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT); + break; + default: + break; + } + + control = SSS_ADM_MSG_ELEM_CTRL_SET(SSS_SIZE_TO_8B(elem_len), ELEM_LEN) | + SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = sss_xor_chksum_set(&control); + + control |= SSS_ADM_MSG_ELEM_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *elem_ctrl = cpu_to_be64(control); +} + +static void sss_prepare_elem_desc(struct sss_adm_msg *adm_msg, + u8 node_id, u16 cmd_size) +{ + u32 priv; + struct sss_adm_msg_elem *elem = adm_msg->now_node; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_POLL_READ: + priv = SSS_READ_ADM_MSG_PRIV_DATA(adm_msg->msg_type, ctx->store_pi); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_READ, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + break; + case SSS_ADM_MSG_POLL_WRITE: + priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + break; + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_NO_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + + break; + default: + sdk_err(((struct sss_hwdev *)adm_msg->hwdev)->dev_hdl, "Unknown Chain type: %d\n", + adm_msg->msg_type); + return; + } + + elem->desc |= SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_CHANNEL_0, MSG_CHANNEL) | + SSS_ADM_MSG_DESC_SET(SSS_VALID_MSG_CHANNEL, MSG_VALID); + + elem->desc |= SSS_ADM_MSG_DESC_SET(node_id, DEST) | + SSS_ADM_MSG_DESC_SET(SSS_SIZE_TO_4B(cmd_size), SIZE); + + elem->desc |= SSS_ADM_MSG_DESC_SET(sss_xor_chksum_set(&elem->desc), XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + elem->desc = cpu_to_be64(elem->desc); +} + +static void sss_prepare_elem_ctx(struct sss_adm_msg *adm_msg, + const void *cmd, u16 cmd_size) +{ + struct sss_adm_msg_elem_ctx *elem_ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + memcpy(elem_ctx->adm_msg_vaddr, cmd, cmd_size); +} + +static void sss_prepare_elem(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct sss_adm_msg_elem *now_node = adm_msg->now_node; + + sss_prepare_elem_ctrl(&now_node->control, adm_msg->msg_type); + sss_prepare_elem_desc(adm_msg, node_id, cmd_size); + sss_prepare_elem_ctx(adm_msg, cmd, cmd_size); +} + +static inline void sss_adm_msg_increase_pi(struct sss_adm_msg *adm_msg) +{ + adm_msg->pi = SSS_MASK_ID(adm_msg, adm_msg->pi + 1); +} + +static void sss_issue_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_chip_set_pi(adm_msg); +} + +static void sss_update_adm_msg_state(struct sss_adm_msg *adm_msg) +{ + struct sss_adm_msg_state *wb_state; + enum sss_adm_msg_type msg_type; + u64 status_header; + u32 desc_buf; + + wb_state = adm_msg->wb_state; + + desc_buf = be32_to_cpu(wb_state->desc_buf); + if (SSS_GET_ADM_MSG_STATE(desc_buf, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_state->head); + msg_type = SSS_ADM_MSG_STATE_HEAD_GET(status_header, MSG_ID); + if (msg_type >= SSS_ADM_MSG_MAX) + return; + + if (msg_type != adm_msg->msg_type) + return; + + adm_msg->ci = SSS_GET_ADM_MSG_STATE(desc_buf, CI); +} + +static enum sss_process_ret sss_wait_for_state_poll_handler(void *priv_data) +{ + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + sss_update_adm_msg_state(adm_msg); + /* SYNC ADM MSG cmd should start after prev cmd finished */ + if (adm_msg->ci == adm_msg->pi) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static enum sss_process_ret check_cmd_resp_handler(void *priv_data) +{ + struct sss_adm_msg_elem_ctx *ctxt = priv_data; + u64 resp_header; + u8 resp_status; + + if (!SSS_TO_HWDEV(ctxt)->chip_present_flag) { + pr_err("Fail to resp chip present"); + return SSS_PROCESS_ERR; + } + + resp_header = be64_to_cpu(ctxt->reply_fmt->head); + rmb(); /* read the latest header */ + + if (SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + resp_status = SSSNIC_API_CMD_RESP_HEAD_GET(resp_header, STATUS); + if (resp_status) { + pr_err("Api chain response data err, status: %u\n", + resp_status); + return SSS_PROCESS_ERR; + } + + return SSS_PROCESS_OK; + } + + return SSS_PROCESS_DOING; +} + +static int sss_wait_for_state_poll(struct sss_adm_msg *adm_msg) +{ + return sss_check_handler_timeout(adm_msg, sss_wait_for_state_poll_handler, + SSS_ADM_MSG_STATE_TIMEOUT, 100); /* wait 100 us once */ +} + +static int wait_for_resp_polling(struct sss_adm_msg_elem_ctx *ctx) +{ + return sss_check_handler_timeout(ctx, check_cmd_resp_handler, + POLLING_COMPLETION_TIMEOUT_DEFAULT, + USEC_PER_MSEC); +} + +static void copy_resp_data(struct sss_adm_msg_elem_ctx *ctx, void *ack, + u16 ack_size) +{ + struct sss_adm_msg_reply_fmt *resp = ctx->reply_fmt; + + memcpy(ack, &resp->reply, ack_size); + ctx->state = 0; +} + +static int sss_wait_for_adm_msg_completion(struct sss_adm_msg *adm_msg, + struct sss_adm_msg_elem_ctx *ctx, + void *ack, u16 ack_size) +{ + int ret = 0; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_POLL_READ: + ret = wait_for_resp_polling(ctx); + if (ret == 0) + copy_resp_data(ctx, ack, ack_size); + else + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "API CMD poll response timeout\n"); + break; + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + ret = sss_wait_for_state_poll(adm_msg); + break; + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + /* No need to wait */ + break; + default: + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unknown API CMD Chain type: %d\n", + adm_msg->msg_type); + ret = -EINVAL; + } + + if (ret) { + sss_dump_adm_msg_reg(adm_msg); + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Adm msg wait timeout,type :%d\n", + adm_msg->msg_type); + } + + return ret; +} + +static inline void sss_update_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + ctx->state = 1; + ctx->store_pi = adm_msg->pi; + if (ctx->reply_fmt) { + ctx->reply_fmt->head = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +static void sss_adm_msg_lock(struct sss_adm_msg *adm_msg) +{ + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_lock(&adm_msg->async_lock); + else + down(&adm_msg->sem); +} + +static void sss_adm_msg_unlock(struct sss_adm_msg *adm_msg) +{ + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_unlock(&adm_msg->async_lock); + else + up(&adm_msg->sem); +} + +static int sss_adm_msg_io(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + + sss_adm_msg_lock(adm_msg); + + ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + if (sss_adm_msg_busy(adm_msg)) { + sss_adm_msg_unlock(adm_msg); + return -EBUSY; + } + + sss_update_adm_msg_ctx(adm_msg); + + sss_prepare_elem(adm_msg, node_id, cmd, cmd_size); + + sss_adm_msg_increase_pi(adm_msg); + + wmb(); /* make sure issue correctly the command */ + + sss_issue_adm_msg(adm_msg); + + adm_msg->now_node = adm_msg->elem_ctx[adm_msg->pi].elem_vaddr; + + sss_adm_msg_unlock(adm_msg); + + return sss_wait_for_adm_msg_completion(adm_msg, ctx, ack, ack_size); +} + +static int sss_adm_msg_write(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size) +{ + return sss_adm_msg_io(adm_msg, node_id, cmd, cmd_size, NULL, 0); +} + +static int sss_adm_msg_read(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 size, void *ack, u16 ack_size) +{ + return sss_adm_msg_io(adm_msg, node_id, cmd, size, ack, ack_size); +} + +static void sss_set_adm_event_flag(struct sss_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_state = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static u16 sss_align_adm_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(SSS_MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > SSS_MGMT_MSG_SIZE_MIN) + msg_size = SSS_MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - SSS_MGMT_MSG_SIZE_MIN), SSS_MGMT_MSG_SIZE_STEP); + else + msg_size = SSS_MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +static void sss_encapsulate_adm_msg(u8 *adm_msg, u64 *header, + const void *body, int body_len) +{ + u8 *adm_msg_new = adm_msg; + + memset(adm_msg_new, 0, SSS_MGMT_MSG_RSVD_FOR_DEV); + + adm_msg_new += SSS_MGMT_MSG_RSVD_FOR_DEV; + memcpy(adm_msg_new, header, sizeof(*header)); + + adm_msg_new += sizeof(*header); + memcpy(adm_msg_new, body, (size_t)(u32)body_len); +} + +#define SSS_MAX_PF_MGMT_BUF_MAX 2048L + +int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sss_adm_msg *adm_mag = NULL; + + if (!hwdev || !cmd || (ack_size && !ack) || size > SSS_MAX_PF_MGMT_BUF_MAX) + return -EINVAL; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_READ]; + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sss_adm_msg_read(adm_mag, dest, cmd, size, ack, ack_size); +} + +int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sss_adm_msg *adm_mag = NULL; + + if (!hwdev || !size || !cmd || size > SSS_MAX_PF_MGMT_BUF_MAX) + return -EINVAL; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_WRITE]; + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sss_adm_msg_write(adm_mag, dest, cmd, size); +} + +#define SSS_MSG_NO_RESP 0xFFFF + +static int sss_send_adm_msg(struct sss_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg_body, u16 msg_body_len) +{ + struct sss_hwif *hwif = SSS_TO_HWDEV(pf_to_mgmt)->hwif; + void *msg_buf = pf_to_mgmt->sync_buf; + u16 adm_msg_len = sss_align_adm_msg_len(msg_body_len); + u32 func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + u8 node_id = SSS_MGMT_CPU_NODE_ID(SSS_TO_HWDEV(pf_to_mgmt)); + u64 header; + struct sss_adm_msg *adm_mag; + + if (sss_get_dev_present_flag(pf_to_mgmt->hwdev) == 0) + return -EFAULT; + + if (adm_msg_len > SSS_MSG_TO_MGMT_LEN_MAX) + return -EFAULT; + + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_START); + + header = SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_body_len, mod, + cmd, SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt)); + + sss_encapsulate_adm_msg((u8 *)msg_buf, &header, msg_body, msg_body_len); + + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_WRITE_TO_MGMT_MODULE]; + + return sss_adm_msg_write(adm_mag, node_id, msg_buf, adm_msg_len); +} + +static inline void sss_check_msg_body(u8 mod, void *buf_in) +{ + struct sss_msg_head *msg_head = NULL; + + /* set aeq fix num to 3, need to ensure response aeq id < 3 */ + if (mod == SSS_MOD_TYPE_COMM || mod == SSS_MOD_TYPE_L2NIC) { + msg_head = buf_in; + + if (msg_head->reply_aeq_num >= SSS_MAX_AEQ) + msg_head->reply_aeq_num = 0; + } +} + +int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + struct sss_recv_msg *recv_msg = NULL; + struct completion *recv_done = NULL; + ulong timeo; + int err; + ulong ret; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + sss_check_msg_body(mod, buf_in); + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_buf */ + down(&pf_to_mgmt->sync_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg; + recv_done = &recv_msg->done; + + init_completion(recv_done); + + err = sss_send_adm_msg(pf_to_mgmt, mod, cmd, buf_in, in_size); + if (err != 0) { + sdk_err(dev, "Fail to send adm msg to mgmt, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : SSS_MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (ret == 0) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + sss_dump_aeq_info((struct sss_hwdev *)hwdev); + err = -ETIMEDOUT; + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (pf_to_mgmt->event_state == SSS_ADM_EVENT_TIMEOUT) { + spin_unlock(&pf_to_mgmt->sync_event_lock); + err = -ETIMEDOUT; + goto unlock_sync_msg; + } + spin_unlock(&pf_to_mgmt->sync_event_lock); + + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_END); + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) { + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->buf_len) { + sdk_err(dev, + "Invalid resp msg len: %u out of range: %u, mod %d, cmd %u\n", + recv_msg->buf_len, *out_size, mod, cmd); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->buf_len) + memcpy(buf_out, recv_msg->buf, recv_msg->buf_len); + + *out_size = recv_msg->buf_len; + } + +unlock_sync_msg: + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_lock); + + return err; +} + +int sss_register_mgmt_msg_handler(void *hwdev, u8 mod_type, void *data, + sss_mgmt_msg_handler_t handler) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + + if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX) + return -EFAULT; + + mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + if (!mgmt_msg) + return -EINVAL; + + mgmt_msg->recv_data[mod_type] = data; + mgmt_msg->recv_handler[mod_type] = handler; + + set_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]); + + return 0; +} +EXPORT_SYMBOL(sss_register_mgmt_msg_handler); + +void sss_unregister_mgmt_msg_handler(void *hwdev, u8 mod_type) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + + if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX) + return; + + mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + if (!mgmt_msg) + return; + + clear_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]); + + while (test_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod_type])) + usleep_range(SSS_MSG_CB_USLEEP_MIN, SSS_MSG_CB_USLEEP_MAX); + + mgmt_msg->recv_data[mod_type] = NULL; + mgmt_msg->recv_handler[mod_type] = NULL; +} +EXPORT_SYMBOL(sss_unregister_mgmt_msg_handler); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h new file mode 100644 index 00000000000000..54cfe231e63130 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_H +#define SSS_HWIF_ADM_H + +#include +int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size); + +int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); + +int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h new file mode 100644 index 00000000000000..fc0d99e326adec --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_COMMON_H +#define SSS_HWIF_ADM_COMMON_H + +#define SSS_ADM_MSG_AEQ_ID 2 + +#define SSS_WRITE_ADM_MSG_PRIV_DATA(id) (((u8)(id)) << 16) +#define SSS_READ_ADM_MSG_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) + +#define SSS_MASK_ID(adm_msg, id) \ + ((id) & ((adm_msg)->elem_num - 1)) + +#define SSS_SIZE_TO_4B(size) \ + (ALIGN((u32)(size), 4U) >> 2) +#define SSS_SIZE_TO_8B(size) \ + (ALIGN((u32)(size), 8U) >> 3) + +/* ADM_STATUS_0 CSR: 0x0030+adm msg id*0x080 */ +#define SSS_ADM_MSG_STATE_CI_MASK 0xFFFFFFU +#define SSS_ADM_MSG_STATE_CI_SHIFT 0 + +#define SSS_ADM_MSG_STATE_FSM_MASK 0xFU +#define SSS_ADM_MSG_STATE_FSM_SHIFT 24 + +#define SSS_ADM_MSG_STATE_CHKSUM_ERR_MASK 0x3U +#define SSS_ADM_MSG_STATE_CHKSUM_ERR_SHIFT 28 + +#define SSS_ADM_MSG_STATE_CPLD_ERR_MASK 0x1U +#define SSS_ADM_MSG_STATE_CPLD_ERR_SHIFT 30 + +#define SSS_GET_ADM_MSG_STATE(val, member) \ + (((val) >> SSS_ADM_MSG_STATE_##member##_SHIFT) & \ + SSS_ADM_MSG_STATE_##member##_MASK) + +/* adm_msg_elem.desc structure */ +#define SSS_ADM_MSG_DESC_SGL_TYPE_SHIFT 0 +#define SSS_ADM_MSG_DESC_RD_WR_SHIFT 1 +#define SSS_ADM_MSG_DESC_MGMT_BYPASS_SHIFT 2 +#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_SHIFT 3 +#define SSS_ADM_MSG_DESC_MSG_VALID_SHIFT 4 +#define SSS_ADM_MSG_DESC_MSG_CHANNEL_SHIFT 6 +#define SSS_ADM_MSG_DESC_PRIV_DATA_SHIFT 8 +#define SSS_ADM_MSG_DESC_DEST_SHIFT 32 +#define SSS_ADM_MSG_DESC_SIZE_SHIFT 40 +#define SSS_ADM_MSG_DESC_XOR_CHKSUM_SHIFT 56 + +#define SSS_ADM_MSG_DESC_SGL_TYPE_MASK 0x1U +#define SSS_ADM_MSG_DESC_RD_WR_MASK 0x1U +#define SSS_ADM_MSG_DESC_MGMT_BYPASS_MASK 0x1U +#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_MASK 0x1U +#define SSS_ADM_MSG_DESC_MSG_VALID_MASK 0x3U +#define SSS_ADM_MSG_DESC_MSG_CHANNEL_MASK 0x3U +#define SSS_ADM_MSG_DESC_PRIV_DATA_MASK 0xFFFFFFU +#define SSS_ADM_MSG_DESC_DEST_MASK 0x1FU +#define SSS_ADM_MSG_DESC_SIZE_MASK 0x7FFU +#define SSS_ADM_MSG_DESC_XOR_CHKSUM_MASK 0xFFU + +#define SSS_ADM_MSG_DESC_SET(val, member) \ + ((((u64)(val)) & SSS_ADM_MSG_DESC_##member##_MASK) << \ + SSS_ADM_MSG_DESC_##member##_SHIFT) + +/* adm_msg_elem structure */ +#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_SHIFT 0 +#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_SHIFT 56 + +#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define SSS_ADM_MSG_ELEM_CTRL_SET(val, member) \ + ((((u64)(val)) & SSS_ADM_MSG_ELEM_CTRL_##member##_MASK) << \ + SSS_ADM_MSG_ELEM_CTRL_##member##_SHIFT) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c new file mode 100644 index 00000000000000..60343ebcd5b387 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_adm_init.h" +#include "sss_hwif_mgmt_common.h" + +/* ADM_MSG_REQ CSR: 0x0020+adm_id*0x080 */ +#define SSS_ADM_MSG_REQ_RESTART_SHIFT 1 +#define SSS_ADM_MSG_REQ_WB_TRIGGER_SHIFT 2 + +#define SSS_ADM_MSG_REQ_RESTART_MASK 0x1U +#define SSS_ADM_MSG_REQ_WB_TRIGGER_MASK 0x1U + +#define SSS_SET_ADM_MSG_REQ(val, member) \ + (((val) & SSS_ADM_MSG_REQ_##member##_MASK) << \ + SSS_ADM_MSG_REQ_##member##_SHIFT) + +#define SSS_GET_ADM_MSG_REQ(val, member) \ + (((val) >> SSS_ADM_MSG_REQ_##member##_SHIFT) & \ + SSS_ADM_MSG_REQ_##member##_MASK) + +#define SSS_CLEAR_ADM_MSG_REQ(val, member) \ + ((val) & (~(SSS_ADM_MSG_REQ_##member##_MASK \ + << SSS_ADM_MSG_REQ_##member##_SHIFT))) + +/* ADM_MSG_CTRL CSR: 0x0014+adm_id*0x080 */ +#define SSS_ADM_MSG_CTRL_RESTART_EN_SHIFT 1 +#define SSS_ADM_MSG_CTRL_XOR_ERR_SHIFT 2 +#define SSS_ADM_MSG_CTRL_AEQE_EN_SHIFT 4 +#define SSS_ADM_MSG_CTRL_AEQ_ID_SHIFT 8 +#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_SHIFT 28 +#define SSS_ADM_MSG_CTRL_ELEM_SIZE_SHIFT 30 + +#define SSS_ADM_MSG_CTRL_RESTART_EN_MASK 0x1U +#define SSS_ADM_MSG_CTRL_XOR_ERR_MASK 0x1U +#define SSS_ADM_MSG_CTRL_AEQE_EN_MASK 0x1U +#define SSS_ADM_MSG_CTRL_AEQ_ID_MASK 0x3U +#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_MASK 0x3U +#define SSS_ADM_MSG_CTRL_ELEM_SIZE_MASK 0x3U + +#define SSS_SET_ADM_MSG_CTRL(val, member) \ + (((val) & SSS_ADM_MSG_CTRL_##member##_MASK) << \ + SSS_ADM_MSG_CTRL_##member##_SHIFT) + +#define SSS_CLEAR_ADM_MSG_CTRL(val, member) \ + ((val) & (~(SSS_ADM_MSG_CTRL_##member##_MASK \ + << SSS_ADM_MSG_CTRL_##member##_SHIFT))) + +#define SSS_ADM_MSG_BUF_SIZE 2048ULL + +#define SSS_ADM_MSG_NODE_ALIGN_SIZE 512ULL +#define SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE 64ULL + +#define SSS_ADM_MSG_REPLY_ALIGNMENT 128ULL + +#define SSS_ADM_MSG_TIMEOUT 10000 + +#define SSS_ADM_MSG_ELEM_SIZE_SHIFT 6U + +#define SSS_ADM_MSG_ELEM_NUM 32 +#define SSS_ADM_MSG_ELEM_SIZE 128 +#define SSS_ADM_MSG_REPLY_DATA_SIZE 128 + +#define SSS_MGMT_WQ_NAME "sssnic_mgmt" + +#define SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, elem_id) \ + ((adm_msg)->elem_paddr_base + (adm_msg)->elem_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id) \ + ((adm_msg)->elem_vaddr_base + (adm_msg)->elem_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id) \ + ((adm_msg)->buf_paddr_base + (adm_msg)->buf_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id) \ + ((adm_msg)->buf_vaddr_base + (adm_msg)->buf_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id) \ + ((adm_msg)->reply_paddr_base + (adm_msg)->reply_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id) \ + ((adm_msg)->reply_vaddr_base + (adm_msg)->reply_size_align * (elem_id)) + +typedef void (*sss_alloc_elem_buf_handler_t)(struct sss_adm_msg *adm_msg, u32 elem_id); + +struct sss_adm_msg_attr { + struct sss_hwdev *hwdev; + enum sss_adm_msg_type msg_type; + + u32 elem_num; + u16 reply_size; + u16 elem_size; +}; + +static enum sss_process_ret sss_adm_msg_reset_handler(void *priv_data) +{ + u32 val; + u32 addr; + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + if (!SSS_GET_ADM_MSG_REQ(val, RESTART)) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static enum sss_process_ret sss_adm_msg_ready_handler(void *priv_data) +{ + u32 val; + u32 addr; + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + if (SSS_GET_ADM_MSG_STATE(val, CI) == adm_msg->ci) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static void sss_chip_clean_adm_msg(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type); + + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + val = SSS_CLEAR_ADM_MSG_CTRL(val, RESTART_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, XOR_ERR) & + SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, XOR_CHK_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static void sss_chip_set_adm_msg_wb_addr(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_STATE_HI_ADDR(adm_msg->msg_type); + val = upper_32_bits(adm_msg->wb_state_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + addr = SSS_CSR_ADM_MSG_STATE_LO_ADDR(adm_msg->msg_type); + val = lower_32_bits(adm_msg->wb_state_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static int sss_chip_reset_adm_msg(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + val = SSS_CLEAR_ADM_MSG_REQ(val, RESTART); + val |= SSS_SET_ADM_MSG_REQ(1, RESTART); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + return sss_check_handler_timeout(adm_msg, sss_adm_msg_reset_handler, + SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC); +} + +static void sss_chip_init_elem_size(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + u32 size; + + addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + val = SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE); + + size = (u32)ilog2(adm_msg->elem_size >> SSS_ADM_MSG_ELEM_SIZE_SHIFT); + val |= SSS_SET_ADM_MSG_CTRL(0, AEQE_EN) | + SSS_SET_ADM_MSG_CTRL(size, ELEM_SIZE); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static void sss_chip_set_elem_num(struct sss_adm_msg *adm_msg) +{ + u32 addr; + + addr = SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(adm_msg->msg_type); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, adm_msg->elem_num); +} + +static void sss_chip_init_elem_head(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_HEAD_HI_ADDR(adm_msg->msg_type); + val = upper_32_bits(adm_msg->head_elem_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + addr = SSS_CSR_ADM_MSG_HEAD_LO_ADDR(adm_msg->msg_type); + val = lower_32_bits(adm_msg->head_elem_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static int sss_wait_adm_msg_ready(struct sss_adm_msg *adm_msg) +{ + return sss_check_handler_timeout(adm_msg, sss_adm_msg_ready_handler, + SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC); +} + +static int sss_chip_init_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_chip_clean_adm_msg(adm_msg); + + sss_chip_set_adm_msg_wb_addr(adm_msg); + + if (sss_chip_reset_adm_msg(adm_msg) != 0) { + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Fail to restart adm cmd\n"); + return -EBUSY; + } + + sss_chip_init_elem_size(adm_msg); + sss_chip_set_elem_num(adm_msg); + sss_chip_init_elem_head(adm_msg); + + return sss_wait_adm_msg_ready(adm_msg); +} + +static void sss_init_ctx_buf_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id); + paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id); + + ctx->adm_msg_vaddr = vaddr; + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + elem->write.hw_msg_paddr = cpu_to_be64(paddr); +} + +static void sss_init_ctx_reply_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + paddr = SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id); + vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id); + + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + elem->read.hw_wb_reply_paddr = cpu_to_be64(paddr); + ctx->reply_fmt = vaddr; + ctx->adm_msg_vaddr = &elem->read.hw_msg_paddr; +} + +static void sss_init_ctx_buf_reply_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 buf_paddr; + void *buf_vaddr; + void *rsp_vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + rsp_vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id); + buf_paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id); + buf_vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id); + + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + ctx->reply_fmt = rsp_vaddr; + ctx->adm_msg_vaddr = buf_vaddr; + elem->read.hw_msg_paddr = cpu_to_be64(buf_paddr); +} + +static void sss_alloc_reply_buf(struct sss_adm_msg *adm_msg, + struct sss_adm_msg_elem *elem, u32 cell_idx) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)adm_msg->reply_vaddr_base + + adm_msg->reply_size_align * cell_idx); + resp_paddr = adm_msg->reply_paddr_base + + adm_msg->reply_size_align * cell_idx; + + ctx = &adm_msg->elem_ctx[cell_idx]; + + ctx->reply_fmt = resp_vaddr; + elem->read.hw_wb_reply_paddr = cpu_to_be64(resp_paddr); +} + +static int sss_init_elem_ctx(struct sss_adm_msg *adm_msg, u32 elem_id) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + struct sss_adm_msg_elem *elem; + sss_alloc_elem_buf_handler_t handler[] = { + NULL, + NULL, + sss_init_ctx_buf_addr, + sss_init_ctx_reply_addr, + sss_init_ctx_buf_addr, + sss_init_ctx_buf_reply_addr, + sss_init_ctx_buf_addr + }; + elem = (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + + if (adm_msg->msg_type == SSS_ADM_MSG_MULTI_READ || + adm_msg->msg_type == SSS_ADM_MSG_POLL_READ) + sss_alloc_reply_buf(adm_msg, elem, elem_id); + + ctx = &adm_msg->elem_ctx[elem_id]; + ctx->elem_vaddr = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + ctx->hwdev = adm_msg->hwdev; + + if (adm_msg->msg_type >= ARRAY_LEN(handler)) + goto out; + + if (!handler[adm_msg->msg_type]) + goto out; + + handler[adm_msg->msg_type](adm_msg, elem_id); + + return 0; + +out: + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unsupport adm msg type %u\n", adm_msg->msg_type); + return -EINVAL; +} + +static int sss_init_adm_msg_elem(struct sss_adm_msg *adm_msg) +{ + u32 i; + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem *elem = NULL; + struct sss_adm_msg_elem *pre_elt = NULL; + int ret; + + for (i = 0; i < adm_msg->elem_num; i++) { + ret = sss_init_elem_ctx(adm_msg, i); + if (ret != 0) + return ret; + + paddr = SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, i); + vaddr = SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, i); + + if (!pre_elt) { + adm_msg->head_node = vaddr; + adm_msg->head_elem_paddr = (dma_addr_t)paddr; + } else { + pre_elt->next_elem_paddr = cpu_to_be64(paddr); + } + + elem = vaddr; + elem->next_elem_paddr = 0; + + pre_elt = elem; + } + + elem->next_elem_paddr = cpu_to_be64(adm_msg->head_elem_paddr); + adm_msg->now_node = adm_msg->head_node; + + return 0; +} + +static int sss_alloc_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + size_t ctx_size; + + ctx_size = adm_msg->elem_num * sizeof(*adm_msg->elem_ctx); + + adm_msg->elem_ctx = kzalloc(ctx_size, GFP_KERNEL); + if (!adm_msg->elem_ctx) + return -ENOMEM; + + return 0; +} + +static void sss_free_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + kfree(adm_msg->elem_ctx); + adm_msg->elem_ctx = NULL; +} + +static int sss_alloc_adm_msg_wb_state(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + adm_msg->wb_state = dma_zalloc_coherent(dev_hdl, sizeof(*adm_msg->wb_state), + &adm_msg->wb_state_paddr, GFP_KERNEL); + if (!adm_msg->wb_state) { + sdk_err(dev_hdl, "Fail to alloc dma wb status\n"); + return -ENOMEM; + } + + return 0; +} + +static void sss_free_adm_msg_wb_state(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + dma_free_coherent(dev_hdl, sizeof(*adm_msg->wb_state), + adm_msg->wb_state, adm_msg->wb_state_paddr); +} + +static int sss_alloc_elem_buf(struct sss_adm_msg *adm_msg) +{ + int ret; + size_t buf_size; + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + adm_msg->buf_size_align = ALIGN(SSS_ADM_MSG_BUF_SIZE, + SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE); + adm_msg->elem_size_align = ALIGN((u64)adm_msg->elem_size, + SSS_ADM_MSG_NODE_ALIGN_SIZE); + adm_msg->reply_size_align = ALIGN((u64)adm_msg->reply_size, + SSS_ADM_MSG_REPLY_ALIGNMENT); + buf_size = (adm_msg->buf_size_align + adm_msg->elem_size_align + + adm_msg->reply_size_align) * adm_msg->elem_num; + + ret = sss_dma_zalloc_coherent_align(dev_hdl, buf_size, SSS_ADM_MSG_NODE_ALIGN_SIZE, + GFP_KERNEL, &adm_msg->elem_addr); + if (ret != 0) { + sdk_err(dev_hdl, "Fail to alloc adm msg elem buffer\n"); + return ret; + } + + adm_msg->elem_vaddr_base = adm_msg->elem_addr.align_vaddr; + adm_msg->elem_paddr_base = adm_msg->elem_addr.align_paddr; + + adm_msg->reply_vaddr_base = (u8 *)((u64)adm_msg->elem_vaddr_base + + adm_msg->elem_size_align * adm_msg->elem_num); + adm_msg->reply_paddr_base = adm_msg->elem_paddr_base + + adm_msg->elem_size_align * adm_msg->elem_num; + + adm_msg->buf_vaddr_base = (u8 *)((u64)adm_msg->reply_vaddr_base + + adm_msg->reply_size_align * adm_msg->elem_num); + adm_msg->buf_paddr_base = adm_msg->reply_paddr_base + + adm_msg->reply_size_align * adm_msg->elem_num; + + return 0; +} + +static void sss_free_elem_buf(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + sss_dma_free_coherent_align(dev_hdl, &adm_msg->elem_addr); +} + +static int sss_alloc_adm_msg_buf(struct sss_adm_msg *adm_msg) +{ + int ret; + + ret = sss_alloc_adm_msg_ctx(adm_msg); + if (ret != 0) + return ret; + + ret = sss_alloc_adm_msg_wb_state(adm_msg); + if (ret != 0) + goto alloc_wb_err; + + ret = sss_alloc_elem_buf(adm_msg); + if (ret != 0) + goto alloc_elem_buf_err; + + return 0; + +alloc_elem_buf_err: + sss_free_adm_msg_wb_state(adm_msg); + +alloc_wb_err: + sss_free_adm_msg_ctx(adm_msg); + + return ret; +} + +static void sss_free_adm_msg_buf(struct sss_adm_msg *adm_msg) +{ + sss_free_elem_buf(adm_msg); + + sss_free_adm_msg_wb_state(adm_msg); + + sss_free_adm_msg_ctx(adm_msg); +} + +static void sss_init_adm_msg_param(struct sss_adm_msg *adm_msg, + struct sss_hwdev *hwdev, u8 msg_type) +{ + adm_msg->hwdev = hwdev; + adm_msg->elem_num = SSS_ADM_MSG_ELEM_NUM; + adm_msg->reply_size = SSS_ADM_MSG_REPLY_DATA_SIZE; + adm_msg->elem_size = SSS_ADM_MSG_ELEM_SIZE; + adm_msg->msg_type = msg_type; + adm_msg->pi = 0; + adm_msg->ci = 0; + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_lock_init(&adm_msg->async_lock); + else + sema_init(&adm_msg->sem, 1); +} + +static int create_adm_msg(struct sss_hwdev *hwdev, struct sss_adm_msg **adm_msg, u8 msg_type) +{ + struct sss_adm_msg *msg; + int ret; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + sss_init_adm_msg_param(msg, hwdev, msg_type); + + ret = sss_alloc_adm_msg_buf(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg buf\n"); + return ret; + } + + ret = sss_init_adm_msg_elem(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg elem\n"); + sss_free_adm_msg_buf(msg); + return ret; + } + + ret = sss_chip_init_adm_msg(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n"); + sss_free_adm_msg_buf(msg); + return ret; + } + + *adm_msg = msg; + + return 0; +} + +static void sss_destroy_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_free_adm_msg_buf(adm_msg); + kfree(adm_msg); +} + +static int sss_init_adm_msg(struct sss_hwdev *hwdev, + struct sss_adm_msg **adm_msg) +{ + int ret; + u8 i; + u8 adm_msg_type; + void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return 0; + + for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; + adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++) { + ret = create_adm_msg(hwdev, &adm_msg[adm_msg_type], adm_msg_type); + if (ret) { + sdk_err(dev, "Failed to create adm msg %d\n", adm_msg_type); + goto create_adm_msg_err; + } + } + + return 0; + +create_adm_msg_err: + for (i = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; i < adm_msg_type; i++) + sss_destroy_adm_msg(hwdev->pf_to_mgmt->adm_msg[adm_msg_type]); + + return ret; +} + +static void sss_deinit_adm_msg(const struct sss_hwdev *hwdev, + struct sss_adm_msg **adm_msg) +{ + u8 adm_msg_type; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return; + + for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; + adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++) + sss_destroy_adm_msg(adm_msg[adm_msg_type]); +} + +static int sss_alloc_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg) +{ + struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg; + struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg; + + recv_msg->seq_id = SSS_MGMT_SEQ_ID_MAX; + resp_msg->seq_id = SSS_MGMT_SEQ_ID_MAX; + + recv_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!recv_msg->buf) + return -ENOMEM; + + resp_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!resp_msg->buf) + goto alloc_resp_msg_err; + + mgmt_msg->ack_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->ack_buf) + goto alloc_ack_buf_err; + + mgmt_msg->sync_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->sync_buf) + goto alloc_sync_buf_err; + + mgmt_msg->async_msg_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->async_msg_buf) + goto alloc_async_msg_buf_err; + + return 0; + +alloc_async_msg_buf_err: + kfree(mgmt_msg->sync_buf); + mgmt_msg->sync_buf = NULL; +alloc_sync_buf_err: + kfree(mgmt_msg->ack_buf); + mgmt_msg->ack_buf = NULL; + +alloc_ack_buf_err: + kfree(resp_msg->buf); + resp_msg->buf = NULL; + +alloc_resp_msg_err: + kfree(recv_msg->buf); + recv_msg->buf = NULL; + + return -ENOMEM; +} + +static void sss_free_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg) +{ + struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg; + struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg; + + kfree(mgmt_msg->async_msg_buf); + kfree(mgmt_msg->sync_buf); + kfree(mgmt_msg->ack_buf); + kfree(resp_msg->buf); + kfree(recv_msg->buf); +} + +int sss_hwif_init_adm(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_msg_pf_to_mgmt *mgmt_msg; + + mgmt_msg = kzalloc(sizeof(*mgmt_msg), GFP_KERNEL); + if (!mgmt_msg) + return -ENOMEM; + + spin_lock_init(&mgmt_msg->async_msg_lock); + spin_lock_init(&mgmt_msg->sync_event_lock); + sema_init(&mgmt_msg->sync_lock, 1); + mgmt_msg->hwdev = hwdev; + hwdev->pf_to_mgmt = mgmt_msg; + + mgmt_msg->workq = create_singlethread_workqueue(SSS_MGMT_WQ_NAME); + if (!mgmt_msg->workq) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt workq\n"); + ret = -ENOMEM; + goto alloc_mgmt_wq_err; + } + + ret = sss_alloc_msg_buf(mgmt_msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc msg buffer\n"); + goto alloc_msg_buf_err; + } + + ret = sss_init_adm_msg(hwdev, mgmt_msg->adm_msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n"); + goto init_all_adm_err; + } + + return 0; + +init_all_adm_err: + sss_free_msg_buf(mgmt_msg); + +alloc_msg_buf_err: + destroy_workqueue(mgmt_msg->workq); + +alloc_mgmt_wq_err: + kfree(mgmt_msg); + hwdev->pf_to_mgmt = NULL; + + return ret; +} + +void sss_hwif_deinit_adm(struct sss_hwdev *hwdev) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = hwdev->pf_to_mgmt; + + destroy_workqueue(mgmt_msg->workq); + + sss_deinit_adm_msg(hwdev, mgmt_msg->adm_msg); + + sss_free_msg_buf(mgmt_msg); + + kfree(mgmt_msg); + hwdev->pf_to_mgmt = NULL; +} + +void sss_complete_adm_event(struct sss_hwdev *hwdev) +{ + struct sss_recv_msg *recv_msg = + &hwdev->pf_to_mgmt->recv_resp_msg; + + spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock); + if (hwdev->pf_to_mgmt->event_state == SSS_ADM_EVENT_START) { + complete(&recv_msg->done); + hwdev->pf_to_mgmt->event_state = SSS_ADM_EVENT_TIMEOUT; + } + spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h new file mode 100644 index 00000000000000..c2c3092fbdc00d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_INIT_H +#define SSS_HWIF_ADM_INIT_H + +#include "sss_hwdev.h" + +int sss_hwif_init_adm(struct sss_hwdev *hwdev); +void sss_hwif_deinit_adm(struct sss_hwdev *hwdev); +void sss_complete_adm_event(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c new file mode 100644 index 00000000000000..93bda1133420cf --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_eq_info.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_hw_aeq.h" +#include "sss_hw_export.h" +#include "sss_hwif_aeq.h" +#include "sss_hw_common.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" +#include "sss_csr.h" + +#define SSS_DEF_AEQ_DEPTH 0x10000 + +#define SSS_MIN_AEQ_DEPTH 64 +#define SSS_MAX_AEQ_DEPTH \ + ((SSS_MAX_EQ_PAGE_SIZE / SSS_AEQE_SIZE) * SSS_AEQ_MAX_PAGE) + +#define SSS_AEQE_DESC_SIZE 4 +#define SSS_AEQE_DATA_SIZE (SSS_AEQE_SIZE - SSS_AEQE_DESC_SIZE) + +struct sss_aeq_elem { + u8 aeqe_data[SSS_AEQE_DATA_SIZE]; + u32 desc; +}; + +#define SSS_GET_AEQ_ELEM(aeq, id) \ + ((struct sss_aeq_elem *)SSS_GET_EQ_ELEM((aeq), (id))) + +#define SSS_GET_CUR_AEQ_ELEM(aeq) SSS_GET_AEQ_ELEM((aeq), (aeq)->ci) + +#define SSS_GET_AEQ_SW_EVENT(type) \ + (((type) >= SSS_ERR_MAX) ? \ + SSS_STF_EVENT : SSS_STL_EVENT) + +#define SSS_AEQ_CTRL_0_INTR_ID_SHIFT 0 +#define SSS_AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define SSS_AEQ_CTRL_0_PCI_INTF_ID_SHIFT 20 +#define SSS_AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define SSS_AEQ_CTRL_0_INTR_ID_MASK 0x3FFU +#define SSS_AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define SSS_AEQ_CTRL_0_PCI_INTF_ID_MASK 0x7U +#define SSS_AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define SSS_SET_AEQ_CTRL_0(val, member) \ + (((val) & SSS_AEQ_CTRL_0_##member##_MASK) << \ + SSS_AEQ_CTRL_0_##member##_SHIFT) + +#define SSS_CLEAR_AEQ_CTRL_0(val, member) \ + ((val) & (~(SSS_AEQ_CTRL_0_##member##_MASK << \ + SSS_AEQ_CTRL_0_##member##_SHIFT))) + +#define SSS_AEQ_CTRL_1_SIZE_SHIFT 0 +#define SSS_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define SSS_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define SSS_AEQ_CTRL_1_SIZE_MASK 0x1FFFFFU +#define SSS_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define SSS_AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define SSS_SET_AEQ_CTRL_1(val, member) \ + (((val) & SSS_AEQ_CTRL_1_##member##_MASK) << \ + SSS_AEQ_CTRL_1_##member##_SHIFT) + +#define SSS_CLEAR_AEQ_CTRL_1(val, member) \ + ((val) & (~(SSS_AEQ_CTRL_1_##member##_MASK << \ + SSS_AEQ_CTRL_1_##member##_SHIFT))) + +#define SSS_ELEM_SIZE_IN_32B(aeq) (((aeq)->entry_size) >> 5) +#define SSS_SET_EQ_HW_E_SIZE(aeq) ((u32)ilog2(SSS_ELEM_SIZE_IN_32B(aeq))) + +#define SSS_AEQ_WQ_NAME "sss_eqs" + +#define SSS_AEQ_NAME "sss_aeq" + +#define SSS_AEQ_TO_INFO(eq) \ + container_of((eq) - (eq)->qid, struct sss_aeq_info, aeq[0]) + +#define SSS_AEQ_DMA_ATTR_DEF 0 + +enum sss_aeq_cb_state { + SSS_AEQ_HW_CB_REG = 0, + SSS_AEQ_HW_CB_RUNNING, + SSS_AEQ_SW_CB_REG, + SSS_AEQ_SW_CB_RUNNING, +}; + +static u32 aeq_depth = SSS_DEF_AEQ_DEPTH; +module_param(aeq_depth, uint, 0444); +MODULE_PARM_DESC(aeq_depth, + "aeq depth, valid range is " __stringify(SSS_MIN_AEQ_DEPTH) + " - " __stringify(SSS_MAX_AEQ_DEPTH)); + +static void sss_chip_set_aeq_intr(struct sss_eq *aeq) +{ + u32 val; + struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif; + + val = sss_chip_read_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR); + + val = SSS_CLEAR_AEQ_CTRL_0(val, INTR_ID) & + SSS_CLEAR_AEQ_CTRL_0(val, DMA_ATTR) & + SSS_CLEAR_AEQ_CTRL_0(val, PCI_INTF_ID) & + SSS_CLEAR_AEQ_CTRL_0(val, INTR_MODE); + + val |= SSS_SET_AEQ_CTRL_0(SSS_EQ_IRQ_ID(aeq), INTR_ID) | + SSS_SET_AEQ_CTRL_0(SSS_AEQ_DMA_ATTR_DEF, DMA_ATTR) | + SSS_SET_AEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) | + SSS_SET_AEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE); + + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR, val); +} + +static void sss_chip_set_aeq_size(struct sss_eq *aeq) +{ + u32 val; + struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif; + + val = SSS_SET_AEQ_CTRL_1(aeq->len, SIZE) | + SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_E_SIZE(aeq), ELEM_SIZE) | + SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_PAGE_SIZE(aeq), PAGE_SIZE); + + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, val); +} + +static u32 sss_chip_init_aeq_attr(void *aeq) +{ + sss_chip_set_aeq_intr(aeq); + sss_chip_set_aeq_size(aeq); + + return 0; +} + +static void sss_init_aeqe_desc(void *data) +{ + u32 i; + u32 init_val; + struct sss_aeq_elem *aeqe = NULL; + struct sss_eq *aeq = (struct sss_eq *)data; + + init_val = cpu_to_be32(SSS_EQ_WRAPPED(aeq)); + for (i = 0; i < aeq->len; i++) { + aeqe = SSS_GET_AEQ_ELEM(aeq, i); + aeqe->desc = init_val; + } + + /* write all aeq desc */ + wmb(); +} + +static irqreturn_t sss_aeq_intr_handle(int irq, void *data) +{ + struct sss_eq *aeq = (struct sss_eq *)data; + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + + sss_chip_clear_msix_resend_bit(aeq->hwdev, SSS_EQ_IRQ_ID(aeq), + SSS_EQ_MSIX_RESEND_TIMER_CLEAR); + + queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work); + + return IRQ_HANDLED; +} + +static void sss_aeq_event_handle(struct sss_eq *aeq, u32 desc) +{ + u32 size; + u32 event; + u8 data[SSS_AEQE_DATA_SIZE]; + enum sss_aeq_hw_event hw_event; + enum sss_aeq_sw_event sw_event; + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + struct sss_aeq_elem *aeqe; + + aeqe = SSS_GET_CUR_AEQ_ELEM(aeq); + hw_event = SSS_GET_EQE_DESC(desc, TYPE); + SSS_TO_HWDEV(aeq)->aeq_stat.cur_recv_cnt++; + + if (SSS_GET_EQE_DESC(desc, SRC)) { + event = hw_event; + sw_event = SSS_GET_AEQ_SW_EVENT(event); + + memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE); + sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE); + set_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]); + + if (aeq_info->sw_event_handler[sw_event] && + test_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[sw_event])) + aeq_info->sw_event_handler[sw_event](aeq_info->sw_event_data[sw_event], + hw_event, data); + + clear_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]); + + return; + } + + if (hw_event < SSS_AEQ_EVENT_MAX) { + memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE); + sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE); + + size = SSS_GET_EQE_DESC(desc, SIZE); + set_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]); + + if (aeq_info->hw_event_handler[hw_event] && + test_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[hw_event])) + aeq_info->hw_event_handler[hw_event](aeq_info->hw_event_data[hw_event], + data, size); + + clear_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]); + + return; + } + sdk_warn(SSS_TO_HWDEV(aeq)->dev_hdl, "Unknown aeq event %d\n", hw_event); +} + +static bool sss_aeq_irq_handle(struct sss_eq *aeq) +{ + struct sss_aeq_elem *elem = NULL; + u32 desc; + u32 i; + u32 eqe_cnt = 0; + + for (i = 0; i < SSS_TASK_PROCESS_EQE_LIMIT; i++) { + elem = SSS_GET_CUR_AEQ_ELEM(aeq); + + /* Data in HW is in Big endian Format */ + desc = be32_to_cpu(elem->desc); + + /* HW updates wrap bit, when it adds eq element event */ + if (SSS_GET_EQE_DESC(desc, WRAPPED) == aeq->wrap) + return false; + + dma_rmb(); + + sss_aeq_event_handle(aeq, desc); + + sss_increase_eq_ci(aeq); + + if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + sss_chip_set_eq_ci(aeq, SSS_EQ_NOT_ARMED); + } + } + + return true; +} + +static void sss_aeq_irq_work(struct work_struct *work) +{ + bool unfinish; + struct sss_eq *aeq = container_of(work, struct sss_eq, aeq_work); + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + + unfinish = sss_aeq_irq_handle(aeq); + sss_chip_set_eq_ci(aeq, SSS_EQ_ARM_STATE(unfinish)); + + if (unfinish) + queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work); +} + +static void sss_init_aeq_para(struct sss_eq *aeq, u16 qid) +{ + aeq->init_desc_handler = sss_init_aeqe_desc; + aeq->init_attr_handler = sss_chip_init_aeq_attr; + aeq->irq_handler = sss_aeq_intr_handle; + aeq->name = SSS_AEQ_NAME; + INIT_WORK(&aeq->aeq_work, sss_aeq_irq_work); + + aeq->qid = qid; + aeq->len = aeq_depth; + aeq->type = SSS_AEQ; + aeq->entry_size = SSS_AEQE_SIZE; +} + +static int sss_init_aeq(struct sss_hwdev *hwdev, + u16 aeq_num, struct sss_irq_desc *irq) +{ + u16 i; + u16 qid; + int ret; + struct sss_aeq_info *aeq_info = NULL; + + aeq_info = kzalloc(sizeof(*aeq_info), GFP_KERNEL); + if (!aeq_info) + return -ENOMEM; + + hwdev->aeq_info = aeq_info; + aeq_info->hwdev = hwdev; + aeq_info->num = aeq_num; + + aeq_info->workq = alloc_workqueue(SSS_AEQ_WQ_NAME, WQ_MEM_RECLAIM, SSS_MAX_AEQ); + if (!aeq_info->workq) { + ret = -ENOMEM; + sdk_err(hwdev->dev_hdl, "Fail to alloc aeq workqueue\n"); + goto alloc_workq_err; + } + + if (aeq_depth < SSS_MIN_AEQ_DEPTH || aeq_depth > SSS_MAX_AEQ_DEPTH) { + sdk_warn(hwdev->dev_hdl, "Invalid aeq_depth value %u, adjust to %d\n", + aeq_depth, SSS_DEF_AEQ_DEPTH); + aeq_depth = SSS_DEF_AEQ_DEPTH; + } + + for (qid = 0; qid < aeq_num; qid++) { + sss_init_aeq_para(&aeq_info->aeq[qid], qid); + ret = sss_init_eq(hwdev, &aeq_info->aeq[qid], &irq[qid]); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeq %u\n", qid); + goto init_aeq_err; + } + } + + for (qid = 0; qid < aeq_num; qid++) + sss_chip_set_msix_state(hwdev, irq[qid].msix_id, SSS_MSIX_ENABLE); + + return 0; + +init_aeq_err: + for (i = 0; i < qid; i++) + sss_deinit_eq(&aeq_info->aeq[i]); + + destroy_workqueue(aeq_info->workq); + +alloc_workq_err: + kfree(aeq_info); + hwdev->aeq_info = NULL; + + return ret; +} + +void sss_deinit_aeq(struct sss_hwdev *hwdev) +{ + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + enum sss_aeq_hw_event aeq_event; + enum sss_aeq_sw_event sw_aeq_event; + u16 qid; + + for (qid = 0; qid < aeq_info->num; qid++) + sss_deinit_eq(&aeq_info->aeq[qid]); + + for (sw_aeq_event = SSS_STL_EVENT; + sw_aeq_event < SSS_AEQ_SW_EVENT_MAX; sw_aeq_event++) + sss_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (aeq_event = SSS_HW_FROM_INT; + aeq_event < SSS_AEQ_EVENT_MAX; aeq_event++) + sss_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeq_info->workq); + + kfree(aeq_info); + hwdev->aeq_info = NULL; +} + +void sss_get_aeq_irq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 *irq_num) +{ + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + u16 qid; + + for (qid = 0; qid < aeq_info->num; qid++) { + irq_array[qid].irq_id = aeq_info->aeq[qid].irq_desc.irq_id; + irq_array[qid].msix_id = + aeq_info->aeq[qid].irq_desc.msix_id; + } + + *irq_num = aeq_info->num; +} + +void sss_dump_aeq_info(struct sss_hwdev *hwdev) +{ + struct sss_aeq_elem *aeqe = NULL; + struct sss_eq *aeq = NULL; + u32 addr; + u32 ci; + u32 pi; + u32 ctrl0; + u32 id; + int qid; + + for (qid = 0; qid < hwdev->aeq_info->num; qid++) { + aeq = &hwdev->aeq_info->aeq[qid]; + /* Indirect access should set qid first */ + sss_chip_write_reg(SSS_TO_HWDEV(aeq)->hwif, + SSS_EQ_INDIR_ID_ADDR(aeq->type), aeq->qid); + wmb(); /* make sure set qid firstly */ + + addr = SSS_CSR_AEQ_CTRL_0_ADDR; + ctrl0 = sss_chip_read_reg(hwdev->hwif, addr); + id = sss_chip_read_reg(hwdev->hwif, SSS_EQ_INDIR_ID_ADDR(aeq->type)); + + addr = SSS_EQ_CI_REG_ADDR(aeq); + ci = sss_chip_read_reg(hwdev->hwif, addr); + addr = SSS_EQ_PI_REG_ADDR(aeq); + pi = sss_chip_read_reg(hwdev->hwif, addr); + aeqe = SSS_GET_CUR_AEQ_ELEM(aeq); + sdk_err(hwdev->dev_hdl, + "Aeq id: %d, id: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x swci:0x%x\n", + qid, id, ctrl0, ci, pi, work_busy(&aeq->aeq_work), + aeq->wrap, be32_to_cpu(aeqe->desc), aeq->ci); + } + + sss_dump_chip_err_info(hwdev); +} + +int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle, + enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || !event_handler || event >= SSS_AEQ_EVENT_MAX) + return -EINVAL; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + aeq_info->hw_event_handler[event] = event_handler; + aeq_info->hw_event_data[event] = pri_handle; + set_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]); + + return 0; +} + +void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || event >= SSS_AEQ_EVENT_MAX) + return; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + clear_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]); + while (test_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + aeq_info->hw_event_handler[event] = NULL; +} + +int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum sss_aeq_sw_event event, + sss_aeq_sw_event_handler_t sw_event_handler) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || !sw_event_handler || event >= SSS_AEQ_SW_EVENT_MAX) + return -EINVAL; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + aeq_info->sw_event_handler[event] = sw_event_handler; + aeq_info->sw_event_data[event] = pri_handle; + set_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]); + + return 0; +} + +void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || event >= SSS_AEQ_SW_EVENT_MAX) + return; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + clear_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]); + while (test_bit(SSS_AEQ_SW_CB_RUNNING, + &aeq_info->sw_event_handler_state[event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + aeq_info->sw_event_handler[event] = NULL; +} + +int sss_hwif_init_aeq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 aeq_num; + u16 act_num = 0; + int ret; + struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0}; + + aeq_num = SSS_GET_HWIF_AEQ_NUM(hwdev->hwif); + if (aeq_num > SSS_MAX_AEQ) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %d\n", SSS_MAX_AEQ); + aeq_num = SSS_MAX_AEQ; + } + + act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array, aeq_num); + if (act_num == 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq, aeq_num: %u\n", aeq_num); + return -ENOMEM; + } + + if (act_num < aeq_num) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %u\n", act_num); + aeq_num = act_num; + } + + ret = sss_init_aeq(hwdev, aeq_num, irq_array); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeq\n"); + goto init_aeqs_err; + } + + return 0; + +init_aeqs_err: + for (i = 0; i < aeq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id); + + return ret; +} + +void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 irq_num; + struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0}; + + sss_get_aeq_irq(hwdev, irq_array, &irq_num); + + sss_deinit_aeq(hwdev); + + for (i = 0; i < irq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id); +} + +int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev) +{ + int i; + int ret; + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + struct sss_irq_cfg intr_info = {0}; + + sss_init_eq_intr_info(&intr_info); + + for (i = aeq_info->num - 1; i >= 0; i--) { + intr_info.msix_id = SSS_EQ_IRQ_ID(&aeq_info->aeq[i]); + ret = sss_chip_set_eq_msix_attr(hwdev, &intr_info, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set msix attr for aeq %d\n", i); + return -EFAULT; + } + } + + return 0; +} + +u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data) +{ + struct sss_hwdev *hwdev = (struct sss_hwdev *)dev; + + if (!hwdev) + return 0; + + sdk_err(hwdev->dev_hdl, "Received ucode aeq event, type: 0x%x, data: 0x%llx\n", + aeq_event, *((u64 *)data)); + + if (aeq_event < SSS_ERR_MAX) + atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[aeq_event]); + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h new file mode 100644 index 00000000000000..105c8e98572319 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_AEQ_H +#define SSS_HWIF_AEQ_H + +#include "sss_hw_irq.h" +#include "sss_hw_aeq.h" +#include "sss_hwdev.h" +#include "sss_aeq_info.h" + +void sss_deinit_aeq(struct sss_hwdev *hwdev); +void sss_get_aeq_irq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 *irq_num); +void sss_dump_aeq_info(struct sss_hwdev *hwdev); +int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle, + enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler); +void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event); +int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum sss_aeq_sw_event event, + sss_aeq_sw_event_handler_t sw_event_handler); +void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event); +int sss_hwif_init_aeq(struct sss_hwdev *hwdev); +void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev); +int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev); +u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c new file mode 100644 index 00000000000000..1c7c907dea3136 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" + +#define SSS_GET_REG_FLAG(reg) ((reg) & (~(SSS_CSR_FLAG_MASK))) +#define SSS_GET_REG_ADDR(reg) ((reg) & (SSS_CSR_FLAG_MASK)) + +#define SSS_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +#define SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val) ((val) & (~(1U << (host_id)))) +#define SSS_SET_SLAVE_HOST_STATUS(host_id, enable) (((u8)(enable) & 1U) << (host_id)) + +#define SSS_MULT_HOST_SLAVE_STATUS_ADDR (SSS_MGMT_FLAG + 0xDF30) + +u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg) +{ + if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG) + return be32_to_cpu(readl(hwif->mgmt_reg_base + + SSS_GET_REG_ADDR(reg))); + else + return be32_to_cpu(readl(hwif->cfg_reg_base + + SSS_GET_REG_ADDR(reg))); +} + +void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val) +{ + if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG) + writel(cpu_to_be32(val), + hwif->mgmt_reg_base + SSS_GET_REG_ADDR(reg)); + else + writel(cpu_to_be32(val), + hwif->cfg_reg_base + SSS_GET_REG_ADDR(reg)); +} + +bool sss_chip_get_present_state(void *hwdev) +{ + u32 val; + + val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR); + if (val == SSS_PCIE_LINK_DOWN) { + sdk_warn(SSS_TO_DEV(hwdev), "Card is not present\n"); + return false; + } + + return true; +} + +u32 sss_chip_get_pcie_link_status(void *hwdev) +{ + u32 val; + + if (!hwdev) + return SSS_PCIE_LINK_DOWN; + + val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR); + if (val == SSS_PCIE_LINK_DOWN) + return val; + + return !SSS_GET_AF1(val, MGMT_INIT_STATUS); +} + +void sss_chip_set_pf_status(struct sss_hwif *hwif, + enum sss_pf_status status) +{ + u32 val; + + if (SSS_GET_HWIF_FUNC_TYPE(hwif) == SSS_FUNC_TYPE_VF) + return; + + val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + val = SSS_CLEAR_AF6(val, PF_STATUS); + val |= SSS_SET_AF6(status, PF_STATUS); + + sss_chip_write_reg(hwif, SSS_CSR_HW_ATTR6_ADDR, val); +} + +enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif) +{ + u32 val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + + return SSS_GET_AF6(val, PF_STATUS); +} + +void sss_chip_enable_doorbell(struct sss_hwif *hwif) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_HW_ATTR4_ADDR; + val = sss_chip_read_reg(hwif, addr); + + val = SSS_CLEAR_AF4(val, DOORBELL_CTRL); + val |= SSS_SET_AF4(DB_ENABLE, DOORBELL_CTRL); + + sss_chip_write_reg(hwif, addr, val); +} + +void sss_chip_disable_doorbell(struct sss_hwif *hwif) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_HW_ATTR4_ADDR; + val = sss_chip_read_reg(hwif, addr); + + val = SSS_CLEAR_AF4(val, DOORBELL_CTRL); + val |= SSS_SET_AF4(DB_DISABLE, DOORBELL_CTRL); + + sss_chip_write_reg(hwif, addr, val); +} + +void sss_free_db_id(struct sss_hwif *hwif, u32 id) +{ + struct sss_db_pool *pool = &hwif->db_pool; + + if (id >= pool->bit_size) + return; + + spin_lock(&pool->id_lock); + clear_bit((int)id, pool->bitmap); + spin_unlock(&pool->id_lock); +} + +int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id) +{ + struct sss_db_pool *pool = &hwif->db_pool; + u32 pg_id; + + spin_lock(&pool->id_lock); + pg_id = (u32)find_first_zero_bit(pool->bitmap, pool->bit_size); + if (pg_id == pool->bit_size) { + spin_unlock(&pool->id_lock); + return -ENOMEM; + } + set_bit(pg_id, pool->bitmap); + spin_unlock(&pool->id_lock); + + *id = pg_id; + + return 0; +} + +void sss_dump_chip_err_info(struct sss_hwdev *hwdev) +{ + u32 value; + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return; + + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_BASE_INFO_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip base info: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_MGMT_HEALTH_STATUS_ADDR); + sdk_warn(hwdev->dev_hdl, "Mgmt CPU health status: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status0: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status1: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info0: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info1: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO2_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info2: 0x%08x\n", value); +} + +u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id) +{ + u32 addr; + u32 val; + + if (!hwdev) + return 0; + + addr = SSS_CSR_FUNC_PPF_ELECT(host_id); + val = sss_chip_read_reg(hwdev->hwif, addr); + + return SSS_GET_PPF_ELECT_PORT(val, ID); +} + +static void sss_init_eq_msix_cfg(void *hwdev, + struct sss_cmd_msix_config *cmd_msix, + struct sss_irq_cfg *info) +{ + cmd_msix->opcode = SSS_MGMT_MSG_SET_CMD; + cmd_msix->func_id = sss_get_global_func_id(hwdev); + cmd_msix->msix_index = (u16)info->msix_id; + cmd_msix->lli_credit_cnt = info->lli_credit; + cmd_msix->lli_timer_cnt = info->lli_timer; + cmd_msix->pending_cnt = info->pending; + cmd_msix->coalesce_timer_cnt = info->coalesc_timer; + cmd_msix->resend_timer_cnt = info->resend_timer; +} + +int sss_chip_set_eq_msix_attr(void *hwdev, + struct sss_irq_cfg *intr_info, u16 ch) +{ + int ret; + struct sss_cmd_msix_config cmd_msix = {0}; + u16 out_len = sizeof(cmd_msix); + + sss_init_eq_msix_cfg(hwdev, &cmd_msix, intr_info); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, ch); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set eq msix cfg, ret: %d, status: 0x%x, out_len: 0x%x, ch: 0x%x\n", + ret, cmd_msix.head.state, out_len, ch); + return -EINVAL; + } + + return 0; +} + +int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size) +{ + int ret; + struct sss_cmd_wq_page_size cmd_page = {0}; + u16 out_len = sizeof(cmd_page); + + cmd_page.opcode = SSS_MGMT_MSG_SET_CMD; + cmd_page.func_id = func_id; + cmd_page.page_size = SSS_PAGE_SIZE_HW(page_size); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CFG_PAGESIZE, + &cmd_page, sizeof(cmd_page), &cmd_page, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_page)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set wq page size, ret: %d, status: 0x%x, out_len: 0x%0x\n", + ret, cmd_page.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid, + u32 attr0, u32 attr1) +{ + int ret; + struct sss_cmd_ceq_ctrl_reg cmd_ceq = {0}; + u16 out_len = sizeof(cmd_ceq); + + cmd_ceq.func_id = sss_get_global_func_id(hwdev); + cmd_ceq.qid = qid; + cmd_ceq.ctrl0 = attr0; + cmd_ceq.ctrl1 = attr1; + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + &cmd_ceq, sizeof(cmd_ceq), &cmd_ceq, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ceq)) { + sdk_err(hwdev->dev_hdl, + "Fail to set ceq %u ctrl, ret: %d status: 0x%x, out_len: 0x%x\n", + qid, ret, cmd_ceq.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +void sss_chip_set_slave_host_status(void *dev, u8 host_id, bool enable) +{ + u32 val; + struct sss_hwdev *hwdev = dev; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + val = sss_chip_read_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR); + val = SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val); + val |= SSS_SET_SLAVE_HOST_STATUS(host_id, !!enable); + + sss_chip_write_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR, val); + + sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n", + host_id, enable, val); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h new file mode 100644 index 00000000000000..f299bf0fa6d906 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_API_H +#define SSS_HWIF_API_H + +#include "sss_hwdev.h" + +enum sss_pf_status { + SSS_PF_STATUS_INIT = 0X0, + SSS_PF_STATUS_ACTIVE_FLAG = 0x11, + SSS_PF_STATUS_FLR_START_FLAG = 0x12, + SSS_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +enum sss_doorbell_ctrl { + DB_ENABLE, + DB_DISABLE, +}; + +enum sss_outbound_ctrl { + OUTBOUND_ENABLE, + OUTBOUND_DISABLE, +}; + +#define SSS_PCIE_LINK_DOWN 0xFFFFFFFF +#define SSS_PCIE_LINK_UP 0 + +#define SSS_AF1_PPF_ID_SHIFT 0 +#define SSS_AF1_AEQ_PER_FUNC_SHIFT 8 +#define SSS_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define SSS_AF1_PF_INIT_STATUS_SHIFT 31 + +#define SSS_AF1_PPF_ID_MASK 0x3F +#define SSS_AF1_AEQ_PER_FUNC_MASK 0x3 +#define SSS_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define SSS_AF1_PF_INIT_STATUS_MASK 0x1 + +#define SSS_GET_AF1(val, member) \ + (((val) >> SSS_AF1_##member##_SHIFT) & SSS_AF1_##member##_MASK) + +#define SSS_AF4_DOORBELL_CTRL_SHIFT 0 +#define SSS_AF4_DOORBELL_CTRL_MASK 0x1 + +#define SSS_GET_AF4(val, member) \ + (((val) >> SSS_AF4_##member##_SHIFT) & SSS_AF4_##member##_MASK) + +#define SSS_SET_AF4(val, member) \ + (((val) & SSS_AF4_##member##_MASK) << SSS_AF4_##member##_SHIFT) + +#define SSS_CLEAR_AF4(val, member) \ + ((val) & (~(SSS_AF4_##member##_MASK << SSS_AF4_##member##_SHIFT))) + +#define SSS_AF6_PF_STATUS_SHIFT 0 +#define SSS_AF6_PF_STATUS_MASK 0xFFFF + +#define SSS_AF6_FUNC_MAX_SQ_SHIFT 23 +#define SSS_AF6_FUNC_MAX_SQ_MASK 0x1FF + +#define SSS_AF6_MSIX_FLEX_EN_SHIFT 22 +#define SSS_AF6_MSIX_FLEX_EN_MASK 0x1 + +#define SSS_SET_AF6(val, member) \ + ((((u32)(val)) & SSS_AF6_##member##_MASK) << \ + SSS_AF6_##member##_SHIFT) + +#define SSS_GET_AF6(val, member) \ + (((u32)(val) >> SSS_AF6_##member##_SHIFT) & SSS_AF6_##member##_MASK) + +#define SSS_CLEAR_AF6(val, member) \ + ((u32)(val) & (~(SSS_AF6_##member##_MASK << \ + SSS_AF6_##member##_SHIFT))) + +#define SSS_PPF_ELECT_PORT_ID_SHIFT 0 + +#define SSS_PPF_ELECT_PORT_ID_MASK 0x3F + +#define SSS_GET_PPF_ELECT_PORT(val, member) \ + (((val) >> SSS_PPF_ELECT_PORT_##member##_SHIFT) & \ + SSS_PPF_ELECT_PORT_##member##_MASK) + +#define SSS_PPF_ELECTION_ID_SHIFT 0 + +#define SSS_PPF_ELECTION_ID_MASK 0x3F + +#define SSS_SET_PPF(val, member) \ + (((val) & SSS_PPF_ELECTION_##member##_MASK) << \ + SSS_PPF_ELECTION_##member##_SHIFT) + +#define SSS_GET_PPF(val, member) \ + (((val) >> SSS_PPF_ELECTION_##member##_SHIFT) & \ + SSS_PPF_ELECTION_##member##_MASK) + +#define SSS_CLEAR_PPF(val, member) \ + ((val) & (~(SSS_PPF_ELECTION_##member##_MASK << \ + SSS_PPF_ELECTION_##member##_SHIFT))) + +#define SSS_DB_DWQE_SIZE 0x00400000 + +/* db/dwqe page size: 4K */ +#define SSS_DB_PAGE_SIZE 0x00001000ULL +#define SSS_DWQE_OFFSET 0x00000800ULL + +#define SSS_DB_MAX_AREAS (SSS_DB_DWQE_SIZE / SSS_DB_PAGE_SIZE) + +#define SSS_DB_ID(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / SSS_DB_PAGE_SIZE)) + +u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg); +void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val); +bool sss_chip_get_present_state(void *hwdev); +u32 sss_chip_get_pcie_link_status(void *hwdev); +void sss_chip_set_pf_status(struct sss_hwif *hwif, enum sss_pf_status status); +enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif); +void sss_chip_enable_doorbell(struct sss_hwif *hwif); +void sss_chip_disable_doorbell(struct sss_hwif *hwif); +int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id); +void sss_free_db_id(struct sss_hwif *hwif, u32 id); +void sss_dump_chip_err_info(struct sss_hwdev *hwdev); +u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id); +int sss_chip_set_eq_msix_attr(void *hwdev, struct sss_irq_cfg *info, u16 channel); +int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size); +int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid, + u32 attr0, u32 attr1); +void sss_chip_set_slave_host_status(void *hwdev, u8 host_id, bool enable); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c new file mode 100644 index 00000000000000..b3ebdfb07cc232 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_eq_info.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_hw_ceq.h" +#include "sss_hw_export.h" +#include "sss_hwif_ceq.h" +#include "sss_hw_common.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" + +#define SSS_DEF_CEQ_DEPTH 8192 + +#define SSS_CEQ_NAME "sss_ceq" + +#define SSS_CEQ_CTRL_0_INTR_ID_SHIFT 0 +#define SSS_CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define SSS_CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define SSS_CEQ_CTRL_0_PCI_INTF_ID_SHIFT 24 +#define SSS_CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 +#define SSS_CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define SSS_CEQ_CTRL_0_INTR_ID_MASK 0x3FFU +#define SSS_CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define SSS_CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define SSS_CEQ_CTRL_0_PCI_INTF_ID_MASK 0x3U +#define SSS_CEQ_CTRL_0_PAGE_SIZE_MASK 0xF +#define SSS_CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define SSS_SET_CEQ_CTRL_0(val, member) \ + (((val) & SSS_CEQ_CTRL_0_##member##_MASK) << \ + SSS_CEQ_CTRL_0_##member##_SHIFT) + +#define SSS_CEQ_CTRL_1_LEN_SHIFT 0 +#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 + +#define SSS_CEQ_CTRL_1_LEN_MASK 0xFFFFFU +#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU + +#define SSS_SET_CEQ_CTRL_1(val, member) \ + (((val) & SSS_CEQ_CTRL_1_##member##_MASK) << \ + SSS_CEQ_CTRL_1_##member##_SHIFT) + +#define SSS_CEQ_DMA_ATTR_DEF 0 + +#define SSS_MIN_CEQ_DEPTH 64 +#define SSS_MAX_CEQ_DEPTH \ + ((SSS_MAX_EQ_PAGE_SIZE / SSS_CEQE_SIZE) * SSS_CEQ_MAX_PAGE) + +#define SSS_GET_CEQ_ELEM(ceq, id) ((u32 *)SSS_GET_EQ_ELEM((ceq), (id))) + +#define SSS_GET_CUR_CEQ_ELEM(ceq) SSS_GET_CEQ_ELEM((ceq), (ceq)->ci) + +#define SSS_CEQE_TYPE_SHIFT 23 +#define SSS_CEQE_TYPE_MASK 0x7 + +#define SSS_CEQE_TYPE(type) \ + (((type) >> SSS_CEQE_TYPE_SHIFT) & SSS_CEQE_TYPE_MASK) + +#define SSS_CEQE_DATA_MASK 0x3FFFFFF +#define SSS_CEQE_DATA(data) ((data) & SSS_CEQE_DATA_MASK) + +#define SSS_CEQ_TO_INFO(eq) \ + container_of((eq) - (eq)->qid, struct sss_ceq_info, ceq[0]) + +#define CEQ_LMT_KICK_DEF 0 + +enum sss_ceq_cb_state { + SSS_CEQ_CB_REG = 0, + SSS_CEQ_CB_RUNNING, +}; + +static u32 ceq_depth = SSS_DEF_CEQ_DEPTH; +module_param(ceq_depth, uint, 0444); +MODULE_PARM_DESC(ceq_depth, + "ceq depth, valid range is " __stringify(SSS_MIN_CEQ_DEPTH) + " - " __stringify(SSS_MAX_CEQ_DEPTH)); + +static u32 tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT; +module_param(tasklet_depth, uint, 0444); +MODULE_PARM_DESC(tasklet_depth, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +static void sss_init_ceqe_desc(void *data) +{ + u32 i; + u32 init_val; + u32 *ceqe = NULL; + struct sss_eq *ceq = (struct sss_eq *)data; + + init_val = cpu_to_be32(SSS_EQ_WRAPPED(ceq)); + for (i = 0; i < ceq->len; i++) { + ceqe = SSS_GET_CEQ_ELEM(ceq, i); + *(ceqe) = init_val; + } + + /* write all ceq desc */ + wmb(); +} + +static u32 sss_chip_init_ceq_attr(void *data) +{ + u32 val; + u32 len; + struct sss_eq *ceq = (struct sss_eq *)data; + struct sss_hwif *hwif = SSS_TO_HWDEV(ceq)->hwif; + + val = SSS_SET_CEQ_CTRL_0(SSS_EQ_IRQ_ID(ceq), INTR_ID) | + SSS_SET_CEQ_CTRL_0(SSS_CEQ_DMA_ATTR_DEF, DMA_ATTR) | + SSS_SET_CEQ_CTRL_0(CEQ_LMT_KICK_DEF, LIMIT_KICK) | + SSS_SET_CEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) | + SSS_SET_CEQ_CTRL_0(SSS_SET_EQ_HW_PAGE_SIZE(ceq), PAGE_SIZE) | + SSS_SET_CEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE); + len = SSS_SET_CEQ_CTRL_1(ceq->len, LEN); + + return sss_chip_set_ceq_attr(SSS_TO_HWDEV(ceq), ceq->qid, val, len); +} + +static irqreturn_t sss_ceq_intr_handle(int irq, void *data) +{ + struct sss_eq *ceq = (struct sss_eq *)data; + + ceq->hw_intr_jiffies = jiffies; + + sss_chip_clear_msix_resend_bit(ceq->hwdev, SSS_EQ_IRQ_ID(ceq), + SSS_EQ_MSIX_RESEND_TIMER_CLEAR); + + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +static void sss_ceqe_handler(struct sss_eq *ceq, u32 ceqe) +{ + u32 ceqe_data = SSS_CEQE_DATA(ceqe); + enum sss_ceq_event ceq_event = SSS_CEQE_TYPE(ceqe); + struct sss_ceq_info *ceq_info = SSS_CEQ_TO_INFO(ceq); + + if (ceq_event >= SSS_CEQ_EVENT_MAX) { + sdk_err(SSS_TO_HWDEV(ceq)->dev_hdl, "Unknown ceq_event:%d, ceqe_data: 0x%x\n", + ceq_event, ceqe_data); + return; + } + + set_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]); + + if (ceq_info->event_handler[ceq_event] && + test_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event])) + ceq_info->event_handler[ceq_event](ceq_info->event_handler_data[ceq_event], + ceqe_data); + + clear_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]); +} + +static bool sss_ceq_irq_handle(struct sss_eq *ceq) +{ + u32 elem; + u32 eqe_cnt = 0; + u32 i; + + for (i = 0; i < tasklet_depth; i++) { + elem = *(SSS_GET_CUR_CEQ_ELEM(ceq)); + elem = be32_to_cpu(elem); + + /* HW updates wrap bit, when it adds eq element event */ + if (SSS_GET_EQE_DESC(elem, WRAPPED) == ceq->wrap) + return false; + + sss_ceqe_handler(ceq, elem); + + sss_increase_eq_ci(ceq); + + if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + sss_chip_set_eq_ci(ceq, SSS_EQ_NOT_ARMED); + } + } + + return true; +} + +static void sss_ceq_tasklet(ulong ceq_data) +{ + bool unfinish; + struct sss_eq *ceq = (struct sss_eq *)ceq_data; + + ceq->sw_intr_jiffies = jiffies; + unfinish = sss_ceq_irq_handle(ceq); + sss_chip_set_eq_ci(ceq, SSS_EQ_ARM_STATE(unfinish)); + + if (unfinish) + tasklet_schedule(&ceq->ceq_tasklet); +} + +static void sss_init_ceq_para(struct sss_eq *ceq, u16 qid) +{ + ceq->init_desc_handler = sss_init_ceqe_desc; + ceq->init_attr_handler = sss_chip_init_ceq_attr; + ceq->irq_handler = sss_ceq_intr_handle; + ceq->name = SSS_CEQ_NAME; + tasklet_init(&ceq->ceq_tasklet, sss_ceq_tasklet, (ulong)ceq); + + ceq->qid = qid; + ceq->len = ceq_depth; + ceq->type = SSS_CEQ; + ceq->entry_size = SSS_CEQE_SIZE; +} + +static int sss_init_ceq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 irq_num) +{ + u16 i; + u16 qid; + int ret; + struct sss_ceq_info *ceq_info = NULL; + + ceq_info = kzalloc(sizeof(*ceq_info), GFP_KERNEL); + if (!ceq_info) + return -ENOMEM; + + ceq_info->hwdev = hwdev; + ceq_info->num = irq_num; + hwdev->ceq_info = ceq_info; + + if (tasklet_depth == 0) { + sdk_warn(hwdev->dev_hdl, + "Invalid tasklet_depth can not be zero, adjust to %d\n", + SSS_TASK_PROCESS_EQE_LIMIT); + tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT; + } + + if (ceq_depth < SSS_MIN_CEQ_DEPTH || ceq_depth > SSS_MAX_CEQ_DEPTH) { + sdk_warn(hwdev->dev_hdl, + "Invalid ceq_depth %u out of range, adjust to %d\n", + ceq_depth, SSS_DEF_CEQ_DEPTH); + ceq_depth = SSS_DEF_CEQ_DEPTH; + } + + for (qid = 0; qid < irq_num; qid++) { + sss_init_ceq_para(&ceq_info->ceq[qid], qid); + ret = sss_init_eq(hwdev, &ceq_info->ceq[qid], &irq_array[qid]); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq %u\n", qid); + goto init_ceq_err; + } + } + + for (qid = 0; qid < irq_num; qid++) + sss_chip_set_msix_state(hwdev, irq_array[qid].msix_id, SSS_MSIX_ENABLE); + + return 0; + +init_ceq_err: + for (i = 0; i < qid; i++) + sss_deinit_eq(&ceq_info->ceq[i]); + + kfree(ceq_info); + hwdev->ceq_info = NULL; + + return ret; +} + +static void sss_get_ceq_irq(struct sss_hwdev *hwdev, struct sss_irq_desc *irq, + u16 *irq_num) +{ + u16 i; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + + for (i = 0; i < ceq_info->num; i++) { + irq[i].msix_id = ceq_info->ceq[i].irq_desc.msix_id; + irq[i].irq_id = ceq_info->ceq[i].irq_desc.irq_id; + } + + *irq_num = ceq_info->num; +} + +int sss_hwif_init_ceq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 ceq_num; + u16 act_num = 0; + int ret; + struct sss_irq_desc irq_desc[SSS_MAX_CEQ] = {0}; + + ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif); + if (ceq_num > SSS_MAX_CEQ) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", SSS_MAX_CEQ); + ceq_num = SSS_MAX_CEQ; + } + + act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc, ceq_num); + if (act_num == 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq, ceq_num: %u\n", ceq_num); + return -EINVAL; + } + + if (act_num < ceq_num) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", act_num); + ceq_num = act_num; + } + + ret = sss_init_ceq(hwdev, irq_desc, ceq_num); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq, ret:%d\n", ret); + goto init_ceq_err; + } + + return 0; + +init_ceq_err: + for (i = 0; i < act_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc[i].irq_id); + + return ret; +} + +static void sss_deinit_ceq(struct sss_hwdev *hwdev) +{ + u16 i; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + enum sss_ceq_event event; + + for (i = 0; i < ceq_info->num; i++) + sss_deinit_eq(&ceq_info->ceq[i]); + + for (event = SSS_NIC_CTRLQ; event < SSS_CEQ_EVENT_MAX; event++) + sss_ceq_unregister_cb(hwdev, event); + + kfree(ceq_info); + hwdev->ceq_info = NULL; +} + +void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev) +{ + int i; + u16 irq_num = 0; + struct sss_irq_desc irq[SSS_MAX_CEQ] = {0}; + + sss_get_ceq_irq(hwdev, irq, &irq_num); + + sss_deinit_ceq(hwdev); + + for (i = 0; i < irq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq[i].irq_id); +} + +void sss_dump_ceq_info(struct sss_hwdev *hwdev) +{ + struct sss_eq *ceq_info = NULL; + u32 addr; + u32 ci; + u32 pi; + int qid; + + for (qid = 0; qid < hwdev->ceq_info->num; qid++) { + ceq_info = &hwdev->ceq_info->ceq[qid]; + /* Indirect access should set qid first */ + sss_chip_write_reg(SSS_TO_HWDEV(ceq_info)->hwif, + SSS_EQ_INDIR_ID_ADDR(ceq_info->type), ceq_info->qid); + wmb(); /* make sure set qid firstly */ + + addr = SSS_EQ_CI_REG_ADDR(ceq_info); + ci = sss_chip_read_reg(hwdev->hwif, addr); + addr = SSS_EQ_PI_REG_ADDR(ceq_info); + pi = sss_chip_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, + "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", + qid, ci, ceq_info->ci, pi, tasklet_state(&ceq_info->ceq_tasklet), + ceq_info->wrap, be32_to_cpu(*(SSS_GET_CUR_CEQ_ELEM(ceq_info)))); + + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - ceq_info->hw_intr_jiffies)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - ceq_info->sw_intr_jiffies)); + } + + sss_dump_chip_err_info(hwdev); +} + +int sss_ceq_register_cb(void *hwdev, void *data, + enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler) +{ + struct sss_ceq_info *ceq_info = NULL; + + if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX) + return -EINVAL; + + ceq_info = SSS_TO_CEQ_INFO(hwdev); + ceq_info->event_handler_data[ceq_event] = data; + ceq_info->event_handler[ceq_event] = event_handler; + set_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]); + + return 0; +} + +void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event) +{ + struct sss_ceq_info *ceq_info = NULL; + + if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX) + return; + + ceq_info = SSS_TO_CEQ_INFO(hwdev); + clear_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]); + while (test_bit(SSS_CEQ_CB_RUNNING, + &ceq_info->event_handler_state[ceq_event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + ceq_info->event_handler[ceq_event] = NULL; +} + +int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev) +{ + u16 i; + int ret; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + struct sss_irq_cfg intr_info = {0}; + + sss_init_eq_intr_info(&intr_info); + + for (i = 0; i < ceq_info->num; i++) { + intr_info.msix_id = SSS_EQ_IRQ_ID(&ceq_info->ceq[i]); + ret = sss_chip_set_msix_attr(hwdev, intr_info, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set msix attr for ceq %u\n", i); + return -EFAULT; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h new file mode 100644 index 00000000000000..29e65016b11701 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CEQ_H +#define SSS_HWIF_CEQ_H + +#include "sss_hw_ceq.h" +#include "sss_ceq_info.h" +#include "sss_hwdev.h" + +int sss_ceq_register_cb(void *hwdev, void *data, + enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler); +void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event); +int sss_hwif_init_ceq(struct sss_hwdev *hwdev); +void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev); +void sss_dump_ceq_info(struct sss_hwdev *hwdev); +int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c new file mode 100644 index 00000000000000..43386b7984b9da --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c @@ -0,0 +1,928 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_hwif_aeq.h" +#include "sss_hwif_ceq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_CMD_TIMEOUT 5000 /* millisecond */ + +#define SSS_CTRLQ_WQE_HEAD_LEN 32 + +#define SSS_HI_8_BITS(data) (((data) >> 8) & 0xFF) +#define SSS_LO_8_BITS(data) ((data) & 0xFF) + +#define SSS_CTRLQ_DB_INFO_HI_PI_SHIFT 0 +#define SSS_CTRLQ_DB_INFO_HI_PI_MASK 0xFFU +#define SSS_CTRLQ_DB_INFO_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_DB_INFO_##member##_MASK) << \ + SSS_CTRLQ_DB_INFO_##member##_SHIFT) + +#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 +#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_SHIFT 24 +#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_SHIFT 27 +#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U +#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_MASK 0x7U +#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_MASK 0x1FU +#define SSS_CTRLQ_DB_HEAD_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_DB_HEAD_##member##_MASK) << \ + SSS_CTRLQ_DB_HEAD_##member##_SHIFT) + +#define SSS_CTRLQ_CTRL_PI_SHIFT 0 +#define SSS_CTRLQ_CTRL_CMD_SHIFT 16 +#define SSS_CTRLQ_CTRL_MOD_SHIFT 24 +#define SSS_CTRLQ_CTRL_ACK_TYPE_SHIFT 29 +#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define SSS_CTRLQ_CTRL_PI_MASK 0xFFFFU +#define SSS_CTRLQ_CTRL_CMD_MASK 0xFFU +#define SSS_CTRLQ_CTRL_MOD_MASK 0x1FU +#define SSS_CTRLQ_CTRL_ACK_TYPE_MASK 0x3U +#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define SSS_CTRLQ_CTRL_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_CTRL_##member##_MASK) << \ + SSS_CTRLQ_CTRL_##member##_SHIFT) + +#define SSS_CTRLQ_CTRL_GET(val, member) \ + (((val) >> SSS_CTRLQ_CTRL_##member##_SHIFT) & \ + SSS_CTRLQ_CTRL_##member##_MASK) + +#define SSS_CTRLQ_WQE_HEAD_BD_LEN_SHIFT 0 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_SHIFT 15 +#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_SHIFT 22 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_SHIFT 23 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_SHIFT 27 +#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_SHIFT 29 +#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_SHIFT 31 + +#define SSS_CTRLQ_WQE_HEAD_BD_LEN_MASK 0xFFU +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_MASK 0x3U +#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_MASK 0x3U +#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_MASK 0x1U + +#define SSS_CTRLQ_WQE_HEAD_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_WQE_HEAD_##member##_MASK) << \ + SSS_CTRLQ_WQE_HEAD_##member##_SHIFT) + +#define SSS_GET_CTRLQ_WQE_HEAD(val, member) \ + (((val) >> SSS_CTRLQ_WQE_HEAD_##member##_SHIFT) & \ + SSS_CTRLQ_WQE_HEAD_##member##_MASK) + +#define SSS_STORE_DATA_ARM_SHIFT 31 + +#define SSS_STORE_DATA_ARM_MASK 0x1U + +#define SSS_STORE_DATA_SET(val, member) \ + (((val) & SSS_STORE_DATA_##member##_MASK) << \ + SSS_STORE_DATA_##member##_SHIFT) + +#define SSS_STORE_DATA_CLEAR(val, member) \ + ((val) & (~(SSS_STORE_DATA_##member##_MASK << \ + SSS_STORE_DATA_##member##_SHIFT))) + +#define SSS_WQE_ERRCODE_VAL_SHIFT 0 + +#define SSS_WQE_ERRCODE_VAL_MASK 0x7FFFFFFF + +#define SSS_GET_WQE_ERRCODE(val, member) \ + (((val) >> SSS_WQE_ERRCODE_##member##_SHIFT) & \ + SSS_WQE_ERRCODE_##member##_MASK) + +#define SSS_CEQE_CTRLQ_TYPE_SHIFT 0 + +#define SSS_CEQE_CTRLQ_TYPE_MASK 0x7 + +#define SSS_GET_CEQE_CTRLQ(val, member) \ + (((val) >> SSS_CEQE_CTRLQ_##member##_SHIFT) & \ + SSS_CEQE_CTRLQ_##member##_MASK) + +#define SSS_WQE_COMPLETE(ctrl_info) SSS_CTRLQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define SSS_WQE_HEAD(wqe) ((struct sss_ctrlq_head *)(wqe)) + +#define SSS_CTRLQ_DB_PI_OFF(pi) (((u16)SSS_LO_8_BITS(pi)) << 3) + +#define SSS_CTRLQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base)) + SSS_CTRLQ_DB_PI_OFF(pi)) + +#define SSS_FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define SSS_WQE_LCMD_SIZE 64 +#define SSS_WQE_SCMD_SIZE 64 + +#define SSS_COMPLETE_LEN 3 + +#define SSS_CTRLQ_WQE_SIZE 64 + +#define SSS_CTRLQ_TO_INFO(ctrlq) \ + container_of((ctrlq) - (ctrlq)->ctrlq_type, struct sss_ctrlq_info, ctrlq[0]) + +#define SSS_CTRLQ_COMPLETE_CODE 11 + +enum SSS_ctrlq_scmd_type { + SSS_CTRLQ_SET_ARM_CMD = 2, +}; + +enum sss_ctrl_sect_len { + SSS_CTRL_SECT_LEN = 1, + SSS_CTRL_DIRECT_SECT_LEN = 2, +}; + +enum sss_bd_len { + SSS_BD_LCMD_LEN = 2, + SSS_BD_SCMD_LEN = 3, +}; + +enum sss_data_fmt { + SSS_DATA_SGE, + SSS_DATA_DIRECT, +}; + +enum sss_completion_fmt { + SSS_COMPLETE_DIRECT, + SSS_COMPLETE_SGE, +}; + +enum sss_completion_request { + SSS_CEQ_SET = 1, +}; + +enum sss_ctrlq_comm_msg_type { + SSS_SYNC_MSG_DIRECT_REPLY, + SSS_SYNC_MSG_SGE_REPLY, + SSS_ASYNC_MSG, +}; + +#define SSS_SCMD_DATA_LEN 16 + +enum sss_db_src_type { + SSS_DB_SRC_CTRLQ_TYPE, + SSS_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum sss_ctrlq_db_type { + SSS_DB_SQ_RQ_TYPE, + SSS_DB_CTRLQ_TYPE, +}; + +struct sss_ctrlq_db { + u32 head; + u32 info; +}; + +/* hardware define: ctrlq wqe */ +struct sss_ctrlq_head { + u32 info; + u32 store_data; +}; + +struct sss_scmd_bd { + u32 data_len; + u32 rsvd; + u8 data[SSS_SCMD_DATA_LEN]; +}; + +struct sss_lcmd_bd { + struct sss_sge sge; + u32 rsvd1; + u64 store_async_buf; + u64 rsvd3; +}; + +struct sss_wqe_state { + u32 info; +}; + +struct sss_wqe_ctrl { + u32 info; +}; + +struct sss_sge_reply { + struct sss_sge sge; + u32 rsvd; +}; + +struct sss_ctrlq_completion { + union { + struct sss_sge_reply sge_reply; + u64 direct_reply; + }; +}; + +struct sss_ctrlq_wqe_scmd { + struct sss_ctrlq_head head; + u64 rsvd; + struct sss_wqe_state state; + struct sss_wqe_ctrl ctrl; + struct sss_ctrlq_completion completion; + struct sss_scmd_bd bd; +}; + +struct sss_ctrlq_wqe_lcmd { + struct sss_ctrlq_head head; + struct sss_wqe_state state; + struct sss_wqe_ctrl ctrl; + struct sss_ctrlq_completion completion; + struct sss_lcmd_bd bd; +}; + +struct sss_ctrlq_inline_wqe { + struct sss_ctrlq_wqe_scmd wqe_scmd; +}; + +struct sss_ctrlq_wqe { + union { + struct sss_ctrlq_inline_wqe inline_wqe; + struct sss_ctrlq_wqe_lcmd wqe_lcmd; + }; +}; + +typedef int (*sss_ctrlq_type_handler_t)(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci); + +void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci) +{ + if (sss_wq_is_empty(wq)) + return NULL; + + return sss_wq_read_one_wqebb(wq, ci); +} + +static void *sss_ctrlq_get_wqe(struct sss_wq *wq, u16 *pi) +{ + if (!sss_wq_free_wqebb(wq)) + return NULL; + + return sss_wq_get_one_wqebb(wq, pi); +} + +static void sss_ctrlq_set_completion(struct sss_ctrlq_completion *complete, + struct sss_ctrl_msg_buf *out_buf) +{ + struct sss_sge_reply *sge_reply = &complete->sge_reply; + + sss_set_sge(&sge_reply->sge, out_buf->dma_addr, SSS_CTRLQ_BUF_LEN); +} + +static void sss_ctrlq_set_lcmd_bufdesc(struct sss_ctrlq_wqe_lcmd *wqe, + struct sss_ctrl_msg_buf *in_buf) +{ + sss_set_sge(&wqe->bd.sge, in_buf->dma_addr, in_buf->size); +} + +static void sss_ctrlq_fill_db(struct sss_ctrlq_db *db, + enum sss_ctrlq_type ctrlq_type, u16 pi) +{ + db->info = SSS_CTRLQ_DB_INFO_SET(SSS_HI_8_BITS(pi), HI_PI); + + db->head = SSS_CTRLQ_DB_HEAD_SET(SSS_DB_CTRLQ_TYPE, QUEUE_TYPE) | + SSS_CTRLQ_DB_HEAD_SET(ctrlq_type, CTRLQ_TYPE) | + SSS_CTRLQ_DB_HEAD_SET(SSS_DB_SRC_CTRLQ_TYPE, SRC_TYPE); +} + +static void sss_ctrlq_set_db(struct sss_ctrlq *ctrlq, + enum sss_ctrlq_type ctrlq_type, u16 pi) +{ + struct sss_ctrlq_db db = {0}; + u8 *db_base = SSS_TO_HWDEV(ctrlq)->ctrlq_info->db_base; + + sss_ctrlq_fill_db(&db, ctrlq_type, pi); + + /* The data that is written to HW should be in Big Endian Format */ + db.info = sss_hw_be32(db.info); + db.head = sss_hw_be32(db.head); + + wmb(); /* make sure write db info to reg */ + writeq(*((u64 *)&db), SSS_CTRLQ_DB_ADDR(db_base, pi)); +} + +static void sss_ctrlq_fill_wqe(void *dst, const void *src) +{ + memcpy((u8 *)dst + SSS_FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + SSS_FIRST_DATA_TO_WRITE_LAST, + SSS_CTRLQ_WQE_SIZE - SSS_FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void sss_ctrlq_prepare_wqe_ctrl(struct sss_ctrlq_wqe *wqe, + int wrapped, u8 mod, u8 cmd, u16 pi, + enum sss_completion_fmt complete_fmt, + enum sss_data_fmt data_fmt, + enum sss_bd_len buf_len) +{ + struct sss_wqe_ctrl *ctrl = NULL; + enum sss_ctrl_sect_len ctrl_len; + struct sss_ctrlq_wqe_lcmd *wqe_lcmd = NULL; + struct sss_ctrlq_wqe_scmd *wqe_scmd = NULL; + u32 saved_data = SSS_WQE_HEAD(wqe)->store_data; + + if (data_fmt == SSS_DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->state.info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = SSS_CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->state.info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = SSS_CTRL_DIRECT_SECT_LEN; + } + + ctrl->info = SSS_CTRLQ_CTRL_SET(pi, PI) | + SSS_CTRLQ_CTRL_SET(cmd, CMD) | + SSS_CTRLQ_CTRL_SET(mod, MOD) | + SSS_CTRLQ_CTRL_SET(SSS_ACK_TYPE_CTRLQ, ACK_TYPE); + + SSS_WQE_HEAD(wqe)->info = + SSS_CTRLQ_WQE_HEAD_SET(buf_len, BD_LEN) | + SSS_CTRLQ_WQE_HEAD_SET(complete_fmt, COMPLETE_FMT) | + SSS_CTRLQ_WQE_HEAD_SET(data_fmt, DATA_FMT) | + SSS_CTRLQ_WQE_HEAD_SET(SSS_CEQ_SET, COMPLETE_REQ) | + SSS_CTRLQ_WQE_HEAD_SET(SSS_COMPLETE_LEN, COMPLETE_SECT_LEN) | + SSS_CTRLQ_WQE_HEAD_SET(ctrl_len, CTRL_LEN) | + SSS_CTRLQ_WQE_HEAD_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == SSS_CTRLQ_SET_ARM_CMD && mod == SSS_MOD_TYPE_COMM) { + saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM); + SSS_WQE_HEAD(wqe)->store_data = saved_data | + SSS_STORE_DATA_SET(1, ARM); + } else { + saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM); + SSS_WQE_HEAD(wqe)->store_data = saved_data; + } +} + +static void sss_ctrlq_set_lcmd_wqe(struct sss_ctrlq_wqe *wqe, + enum sss_ctrlq_comm_msg_type cmd_type, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, int wrapped, + u8 mod, u8 cmd, u16 pi) +{ + struct sss_ctrlq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum sss_completion_fmt complete_fmt = SSS_COMPLETE_DIRECT; + + switch (cmd_type) { + case SSS_SYNC_MSG_DIRECT_REPLY: + wqe_lcmd->completion.direct_reply = 0; + break; + case SSS_SYNC_MSG_SGE_REPLY: + if (out_buf) { + complete_fmt = SSS_COMPLETE_SGE; + sss_ctrlq_set_completion(&wqe_lcmd->completion, out_buf); + } + break; + case SSS_ASYNC_MSG: + wqe_lcmd->completion.direct_reply = 0; + wqe_lcmd->bd.store_async_buf = (u64)(in_buf); + break; + } + + sss_ctrlq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, pi, complete_fmt, + SSS_DATA_SGE, SSS_BD_LCMD_LEN); + + sss_ctrlq_set_lcmd_bufdesc(wqe_lcmd, in_buf); +} + +static void sss_ctrlq_update_cmd_state(struct sss_ctrlq *ctrlq, u16 pi, + struct sss_ctrlq_wqe *wqe) +{ + struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[pi]; + struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd; + u32 state = sss_hw_cpu32(lcmd->state.info); + + if (info->direct_resp) + *info->direct_resp = + sss_hw_cpu32(lcmd->completion.direct_reply); + + if (info->err_code) + *info->err_code = SSS_GET_WQE_ERRCODE(state, VAL); +} + +static int sss_ctrlq_check_sync_timeout(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 pi) +{ + struct sss_ctrlq_wqe_lcmd *wqe_lcmd; + struct sss_wqe_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = sss_hw_cpu32((ctrl)->info); + if (!SSS_WQE_COMPLETE(ctrl_info)) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe do not complete\n"); + return -EFAULT; + } + + sss_ctrlq_update_cmd_state(ctrlq, pi, wqe); + + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Success to check ctrlq sync cmd\n"); + return 0; +} + +static void sss_reset_cmd_info(struct sss_ctrlq_cmd_info *cmd_info, + const struct sss_ctrlq_cmd_info *store_cmd_info) +{ + if (cmd_info->err_code == store_cmd_info->err_code) + cmd_info->err_code = NULL; + + if (cmd_info->done == store_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == store_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int sss_ctrlq_ceq_handler_state(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_cmd_info *cmd_info, + struct sss_ctrlq_cmd_info *store_cmd_info, + u64 curr_msg_id, u16 curr_pi, + struct sss_ctrlq_wqe *curr_wqe, + u32 timeout) +{ + ulong timeo; + int ret; + ulong end = jiffies + msecs_to_jiffies(timeout); + + if (SSS_TO_HWDEV(ctrlq)->poll) { + while (time_before(jiffies, end)) { + sss_ctrlq_ceq_handler(SSS_TO_HWDEV(ctrlq), 0); + if (store_cmd_info->done->done != 0) + return 0; + usleep_range(9, 10); /* sleep 9 us ~ 10 us */ + } + } else { + timeo = msecs_to_jiffies(timeout); + if (wait_for_completion_timeout(store_cmd_info->done, timeo)) + return 0; + } + + spin_lock_bh(&ctrlq->ctrlq_lock); + + if (cmd_info->cmpt_code == store_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*store_cmd_info->cmpt_code == SSS_CTRLQ_COMPLETE_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq direct sync command complete\n"); + spin_unlock_bh(&ctrlq->ctrlq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->msg_id) { + ret = sss_ctrlq_check_sync_timeout(ctrlq, curr_wqe, curr_pi); + if (ret != 0) + cmd_info->msg_type = SSS_MSG_TYPE_TIMEOUT; + else + cmd_info->msg_type = SSS_MSG_TYPE_PSEUDO_TIMEOUT; + } else { + ret = -ETIMEDOUT; + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync command curr_msg_id dismatch with cmd_info msg_id\n"); + } + + sss_reset_cmd_info(cmd_info, store_cmd_info); + + spin_unlock_bh(&ctrlq->ctrlq_lock); + + if (ret == 0) + return 0; + + sss_dump_ceq_info(SSS_TO_HWDEV(ctrlq)); + + return -ETIMEDOUT; +} + +static int sss_wait_ctrlq_sync_cmd_completion(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_cmd_info *cmd_info, + struct sss_ctrlq_cmd_info *store_cmd_info, + u64 curr_msg_id, u16 curr_pi, + struct sss_ctrlq_wqe *curr_wqe, u32 timeout) +{ + return sss_ctrlq_ceq_handler_state(ctrlq, cmd_info, store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, timeout); +} + +static int sss_ctrlq_msg_lock(struct sss_ctrlq *ctrlq, u16 channel) +{ + struct sss_ctrlq_info *ctrlq_info = SSS_CTRLQ_TO_INFO(ctrlq); + + spin_lock_bh(&ctrlq->ctrlq_lock); + + if (ctrlq_info->lock_channel_en && test_bit(channel, &ctrlq_info->channel_stop)) { + spin_unlock_bh(&ctrlq->ctrlq_lock); + return -EAGAIN; + } + + return 0; +} + +static void sss_ctrlq_msg_unlock(struct sss_ctrlq *ctrlq) +{ + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +static void sss_ctrlq_set_cmd_buf(struct sss_ctrlq_cmd_info *cmd_info, + struct sss_hwdev *hwdev, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf) +{ + cmd_info->in_buf = in_buf; + cmd_info->out_buf = out_buf; + + if (in_buf) + atomic_inc(&in_buf->ref_cnt); + + if (out_buf) + atomic_inc(&out_buf->ref_cnt); +} + +int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod, + u8 cmd, struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sss_wq *wq = &ctrlq->wq; + struct sss_ctrlq_wqe *curr_wqe = NULL; + struct sss_ctrlq_wqe wqe; + struct sss_ctrlq_cmd_info *cmd_info = NULL; + struct sss_ctrlq_cmd_info store_cmd_info; + struct completion done; + u16 curr_pi, next_pi; + int wrapped; + int errcode = 0; + int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int ret; + u32 real_timeout; + + ret = sss_ctrlq_msg_lock(ctrlq, channel); + if (ret != 0) + return ret; + + curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi); + if (!curr_wqe) { + sss_ctrlq_msg_unlock(ctrlq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = ctrlq->wrapped; + + next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ; + if (next_pi >= wq->q_depth) { + ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0; + next_pi -= (u16)wq->q_depth; + } + + cmd_info = &ctrlq->cmd_info[curr_pi]; + + init_completion(&done); + + cmd_info->msg_type = SSS_MSG_TYPE_DIRECT_RESP; + cmd_info->done = &done; + cmd_info->err_code = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, NULL); + + memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info)); + + sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_DIRECT_REPLY, in_buf, NULL, + wrapped, mod, cmd, curr_pi); + + /* CTRLQ WQE is not shadow, therefore wqe will be written to wq */ + sss_ctrlq_fill_wqe(curr_wqe, &wqe); + + (cmd_info->msg_id)++; + curr_msg_id = cmd_info->msg_id; + + sss_ctrlq_set_db(ctrlq, SSS_CTRLQ_SYNC, next_pi); + + sss_ctrlq_msg_unlock(ctrlq); + + real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT; + ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, real_timeout); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync cmd direct resp timeout, mod: %u, cmd: %u, pi: 0x%x\n", + mod, cmd, curr_pi); + ret = -ETIMEDOUT; + } + + if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n", + mod, cmd); + ret = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (ret != 0) ? ret : errcode; +} + +int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sss_wq *wq = &ctrlq->wq; + struct sss_ctrlq_wqe *curr_wqe = NULL, wqe; + struct sss_ctrlq_cmd_info *cmd_info = NULL, store_cmd_info; + struct completion done; + u16 curr_pi, next_pi; + int wrapped, errcode = 0; + int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int ret; + u32 real_timeout; + + ret = sss_ctrlq_msg_lock(ctrlq, channel); + if (ret != 0) + return ret; + + curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi); + if (!curr_wqe) { + sss_ctrlq_msg_unlock(ctrlq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = ctrlq->wrapped; + + next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ; + if (next_pi >= wq->q_depth) { + ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0; + next_pi -= (u16)wq->q_depth; + } + + cmd_info = &ctrlq->cmd_info[curr_pi]; + + init_completion(&done); + + cmd_info->msg_type = SSS_MSG_TYPE_SGE_RESP; + cmd_info->done = &done; + cmd_info->err_code = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, out_buf); + + memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info)); + + sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_SGE_REPLY, in_buf, out_buf, + wrapped, mod, cmd, curr_pi); + + sss_ctrlq_fill_wqe(curr_wqe, &wqe); + + (cmd_info->msg_id)++; + curr_msg_id = cmd_info->msg_id; + + sss_ctrlq_set_db(ctrlq, ctrlq->ctrlq_type, next_pi); + + sss_ctrlq_msg_unlock(ctrlq); + + real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT; + ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, real_timeout); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync cmd detail resp timeout, mod: %u, cmd: %u, pi: 0x%x\n", + mod, cmd, curr_pi); + ret = -ETIMEDOUT; + } + + if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n", + mod, cmd); + ret = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (ret != 0) ? ret : errcode; +} + +void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev, + struct sss_ctrlq_cmd_info *info) +{ + if (info->in_buf) + sss_free_ctrlq_msg_buf(hwdev, info->in_buf); + + if (info->out_buf) + sss_free_ctrlq_msg_buf(hwdev, info->out_buf); + + info->out_buf = NULL; + info->in_buf = NULL; +} + +static void sss_erase_wqe_complete_bit(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *wqe_ctrl = NULL; + u32 head = sss_hw_cpu32(SSS_WQE_HEAD(wqe)->info); + enum sss_data_fmt format = SSS_GET_CTRLQ_WQE_HEAD(head, DATA_FMT); + + wqe_ctrl = (format == SSS_DATA_SGE) ? &wqe->wqe_lcmd.ctrl : + &wqe->inline_wqe.wqe_scmd.ctrl; + + wqe_ctrl->info = 0; + ctrlq->cmd_info[ci].msg_type = SSS_MSG_TYPE_NONE; + + /* write ctrlq wqe msg type */ + wmb(); + + sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ); +} + +static void sss_ctrlq_update_cmd_info(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[ci]; + struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd; + u32 status; + + spin_lock(&ctrlq->ctrlq_lock); + + if (info->direct_resp) + *info->direct_resp = + sss_hw_cpu32(lcmd->completion.direct_reply); + + if (info->err_code) { + status = sss_hw_cpu32(lcmd->state.info); + *info->err_code = SSS_GET_WQE_ERRCODE(status, VAL); + } + + if (info->cmpt_code) { + *info->cmpt_code = SSS_CTRLQ_COMPLETE_CODE; + info->cmpt_code = NULL; + } + + /* read all before set info done */ + smp_rmb(); + + if (info->done) { + complete(info->done); + info->done = NULL; + } + + spin_unlock(&ctrlq->ctrlq_lock); +} + +static int sss_ctrlq_arm_ceq_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_default_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + dma_rmb(); + + sss_ctrlq_update_cmd_info(ctrlq, wqe, ci); + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_async_cmd_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + dma_rmb(); + + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_pseudo_timeout_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_timeout_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + u32 i; + u32 *data = (u32 *)wqe; + u32 num = SSS_CTRLQ_WQE_HEAD_LEN / sizeof(u32); + + sdk_warn(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq timeout, ci: %u\n", ci); + + for (i = 0; i < num; i += 0x4) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + *(data + i), *(data + i + 0x1), *(data + i + 0x2), + *(data + i + 0x3)); + } + + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_force_stop_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + return sss_ctrlq_async_cmd_handler(ctrlq, wqe, ci); +} + +void sss_ctrlq_ceq_handler(void *dev, u32 data) +{ + u16 ci; + int ret; + enum sss_ctrlq_type type = SSS_GET_CEQE_CTRLQ(data, TYPE); + struct sss_ctrlq *ctrlq = &SSS_TO_CTRLQ_INFO(dev)->ctrlq[type]; + struct sss_ctrlq_wqe *ctrlq_wqe = NULL; + struct sss_ctrlq_cmd_info *info = NULL; + + sss_ctrlq_type_handler_t handler[] = { + NULL, + sss_ctrlq_arm_ceq_handler, + sss_ctrlq_default_handler, + sss_ctrlq_default_handler, + sss_ctrlq_async_cmd_handler, + sss_ctrlq_pseudo_timeout_handler, + sss_ctrlq_timeout_handler, + sss_ctrlq_force_stop_handler, + }; + + while ((ctrlq_wqe = sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) != NULL) { + info = &ctrlq->cmd_info[ci]; + + if (info->msg_type < SSS_MSG_TYPE_NONE || + info->msg_type >= SSS_MSG_TYPE_MAX) { + ret = sss_ctrlq_default_handler(ctrlq, ctrlq_wqe, ci); + if (ret) + break; + + continue; + } + + if (!handler[info->msg_type]) + break; + + ret = handler[info->msg_type](ctrlq, ctrlq_wqe, ci); + if (ret) + break; + } +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h new file mode 100644 index 00000000000000..219ef90baf44e0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CTRLQ_H +#define SSS_HWIF_CTRLQ_H + +#include "sss_hw_wq.h" + +#define SSS_CTRLQ_BUF_LEN 2048U + +#define SSS_CTRLQ_SEND_CMPT_CODE 10 + +#define SSS_CTRLQ_FORCE_STOP_CMPT_CODE 12 + +#define SSS_WQEBB_NUM_FOR_CTRLQ 1 + +enum sss_ctrlq_state { + SSS_CTRLQ_ENABLE = BIT(0), +}; + +void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci); +void sss_ctrlq_ceq_handler(void *handle, u32 ceqe_data); +void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev, + struct sss_ctrlq_cmd_info *cmd_info); +int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod, + u8 cmd, struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel); +int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c new file mode 100644 index 00000000000000..8f579544c0cdc3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_pci.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_ENABLE_TIMEOUT 300 + +static int sss_wait_ctrlq_enable(struct sss_ctrlq_info *ctrlq_info) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(SSS_CTRLQ_ENABLE_TIMEOUT); + do { + if (ctrlq_info->state & SSS_CTRLQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && + SSS_TO_HWDEV(ctrlq_info)->chip_present_flag && + !ctrlq_info->disable_flag); + + ctrlq_info->disable_flag = 1; + + return -EBUSY; +} + +static int sss_check_ctrlq_param(const void *hwdev, const struct sss_ctrl_msg_buf *in_buf) +{ + if (!hwdev || !in_buf) { + pr_err("Invalid ctrlq param: hwdev: %p or in_buf: %p\n", hwdev, in_buf); + return -EINVAL; + } + + if (in_buf->size == 0 || in_buf->size > SSS_CTRLQ_BUF_LEN) { + pr_err("Invalid ctrlq buf size: 0x%x\n", in_buf->size); + return -EINVAL; + } + + return 0; +} + +struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev) +{ + struct sss_ctrlq_info *ctrlq_info = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + void *dev = NULL; + + if (!hwdev) { + pr_err("Alloc ctrlq msg buf: hwdev is NULL\n"); + return NULL; + } + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + + msg_buf = kzalloc(sizeof(*msg_buf), GFP_ATOMIC); + if (!msg_buf) + return NULL; + + msg_buf->buf = pci_pool_alloc(ctrlq_info->msg_buf_pool, GFP_ATOMIC, + &msg_buf->dma_addr); + if (!msg_buf->buf) { + sdk_err(dev, "Fail to allocate ctrlq pci pool\n"); + goto alloc_pci_buf_err; + } + + msg_buf->size = SSS_CTRLQ_BUF_LEN; + atomic_set(&msg_buf->ref_cnt, 1); + + return msg_buf; + +alloc_pci_buf_err: + kfree(msg_buf); + return NULL; +} +EXPORT_SYMBOL(sss_alloc_ctrlq_msg_buf); + +void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf) +{ + struct sss_ctrlq_info *ctrlq_info = SSS_TO_CTRLQ_INFO(hwdev); + + if (!hwdev || !msg_buf) { + pr_err("Invalid ctrlq param: hwdev: %p or msg_buf: %p\n", hwdev, msg_buf); + return; + } + + if (atomic_dec_and_test(&msg_buf->ref_cnt) == 0) + return; + + pci_pool_free(ctrlq_info->msg_buf_pool, msg_buf->buf, msg_buf->dma_addr); + kfree(msg_buf); +} +EXPORT_SYMBOL(sss_free_ctrlq_msg_buf); + +int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, u64 *out_param, + u32 timeout, u16 channel) +{ + int ret; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ret = sss_check_ctrlq_param(hwdev, in_buf); + if (ret != 0) { + pr_err("Invalid ctrlq parameters\n"); + return ret; + } + + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) + return -EPERM; + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + ret = sss_wait_ctrlq_enable(ctrlq_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n"); + return ret; + } + + ret = sss_ctrlq_sync_cmd_direct_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC], + mod, cmd, in_buf, out_param, timeout, channel); + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return ret; +} +EXPORT_SYMBOL(sss_ctrlq_direct_reply); + +int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + int ret; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ret = sss_check_ctrlq_param(hwdev, in_buf); + if (ret != 0) + return ret; + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) + return -EPERM; + + ret = sss_wait_ctrlq_enable(ctrlq_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n"); + return ret; + } + + ret = sss_ctrlq_sync_cmd_detail_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC], + mod, cmd, in_buf, out_buf, + out_param, timeout, channel); + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return ret; +} +EXPORT_SYMBOL(sss_ctrlq_detail_reply); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c new file mode 100644 index 00000000000000..a6b049363c3fad --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_pci.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_export.h" +#include "sss_hwif_ceq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_common.h" +#include "sss_hwif_ctrlq_init.h" + +#define SSS_CTRLQ_DEPTH 4096 + +#define SSS_CTRLQ_PFN_SHIFT 12 +#define SSS_CTRLQ_PFN(addr) ((addr) >> SSS_CTRLQ_PFN_SHIFT) + +#define SSS_CTRLQ_CEQ_ID 0 + +#define SSS_CTRLQ_WQ_CLA_SIZE 512 + +#define SSS_CTRLQ_WQEBB_SIZE 64 + +#define SSS_CTRLQ_IDLE_TIMEOUT 5000 + +#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_SHIFT 0 +#define SSS_CTRLQ_CTX_CEQ_ID_SHIFT 53 +#define SSS_CTRLQ_CTX_CEQ_ARM_SHIFT 61 +#define SSS_CTRLQ_CTX_CEQ_EN_SHIFT 62 +#define SSS_CTRLQ_CTX_HW_BUSY_BIT_SHIFT 63 + +#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define SSS_CTRLQ_CTX_CEQ_ID_MASK 0xFF +#define SSS_CTRLQ_CTX_CEQ_ARM_MASK 0x1 +#define SSS_CTRLQ_CTX_CEQ_EN_MASK 0x1 +#define SSS_CTRLQ_CTX_HW_BUSY_BIT_MASK 0x1 + +#define SSS_SET_CTRLQ_CTX_INFO(val, member) \ + (((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \ + << SSS_CTRLQ_CTX_##member##_SHIFT) + +#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_SHIFT 0 +#define SSS_CTRLQ_CTX_CI_SHIFT 52 + +#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define SSS_CTRLQ_CTX_CI_MASK 0xFFF + +#define SSS_SET_CTRLQ_CTX_BLOCK_INFO(val, member) \ + (((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \ + << SSS_CTRLQ_CTX_##member##_SHIFT) + +#define SSS_CTRLQ_CLA_WQ_PAGE_NUM (SSS_CTRLQ_WQ_CLA_SIZE / sizeof(u64)) + +#define SSS_GET_WQ_PAGE_SIZE(page_order) (SSS_HW_WQ_PAGE_SIZE * (1U << (page_order))) + +#define SSS_CTRLQ_DMA_POOL_NAME "sss_ctrlq" + +#define SSS_CTRLQ_WRAP_ENABLE 1 + +#define SSS_SET_WQE_PAGE_PFN(pfn) \ + (SSS_SET_CTRLQ_CTX_INFO(1, CEQ_ARM) | \ + SSS_SET_CTRLQ_CTX_INFO(1, CEQ_EN) | \ + SSS_SET_CTRLQ_CTX_INFO((pfn), NOW_WQE_PAGE_PFN) | \ + SSS_SET_CTRLQ_CTX_INFO(SSS_CTRLQ_CEQ_ID, CEQ_ID) | \ + SSS_SET_CTRLQ_CTX_INFO(1, HW_BUSY_BIT)) + +#define SSS_SET_WQ_BLOCK_PFN(wq, pfn) \ + (SSS_SET_CTRLQ_CTX_BLOCK_INFO((pfn), WQ_BLOCK_PFN) | \ + SSS_SET_CTRLQ_CTX_BLOCK_INFO((u16)(wq)->ci, CI)) + +static u32 wq_page_num = SSS_MAX_WQ_PAGE_NUM; +module_param(wq_page_num, uint, 0444); +MODULE_PARM_DESC(wq_page_num, + "Set wq page num, wq page size is 4K * (2 ^ wq_page_num) - default is 8"); + +static int sss_init_ctrq_block(struct sss_ctrlq_info *ctrlq_info) +{ + u8 i; + + if (SSS_WQ_IS_0_LEVEL_CLA(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq)) + return 0; + + /* ctrlq wq's CLA table is up to 512B */ + if (ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq.page_num > SSS_CTRLQ_CLA_WQ_PAGE_NUM) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq wq page out of range: %lu\n", + SSS_CTRLQ_CLA_WQ_PAGE_NUM); + return -EINVAL; + } + + ctrlq_info->wq_block_vaddr = + dma_zalloc_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE, + &ctrlq_info->wq_block_paddr, GFP_KERNEL); + if (!ctrlq_info->wq_block_vaddr) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to alloc ctrlq wq block\n"); + return -ENOMEM; + } + + for (i = 0; i < ctrlq_info->num; i++) + memcpy((u8 *)ctrlq_info->wq_block_vaddr + SSS_CTRLQ_WQ_CLA_SIZE * i, + ctrlq_info->ctrlq[i].wq.block_vaddr, + ctrlq_info->ctrlq[i].wq.page_num * sizeof(u64)); + + return 0; +} + +static void sss_deinit_ctrq_block(struct sss_ctrlq_info *ctrlq_info) +{ + if (ctrlq_info->wq_block_vaddr) { + dma_free_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE, + ctrlq_info->wq_block_vaddr, ctrlq_info->wq_block_paddr); + ctrlq_info->wq_block_vaddr = NULL; + } +} + +static int sss_create_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info) +{ + u8 i; + int ret; + u8 q_type; + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ret = sss_create_wq(SSS_TO_HWDEV(ctrlq_info), &ctrlq_info->ctrlq[q_type].wq, + SSS_CTRLQ_DEPTH, SSS_CTRLQ_WQEBB_SIZE); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to create ctrlq wq\n"); + goto destroy_wq; + } + } + + /* 1-level CLA must put all ctrlq's wq page addr in one wq block */ + ret = sss_init_ctrq_block(ctrlq_info); + if (ret != 0) + goto destroy_wq; + + return 0; + +destroy_wq: + for (i = 0; i < q_type; i++) + sss_destroy_wq(&ctrlq_info->ctrlq[i].wq); + sss_deinit_ctrq_block(ctrlq_info); + + return ret; +} + +static void sss_destroy_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info) +{ + u8 type; + + sss_deinit_ctrq_block(ctrlq_info); + + for (type = 0; type < ctrlq_info->num; type++) + sss_destroy_wq(&ctrlq_info->ctrlq[type].wq); +} + +static int sss_init_ctrlq_info(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_ctxt_info *ctx, + dma_addr_t wq_block_paddr) +{ + struct sss_wq *wq = &ctrlq->wq; + u64 pfn = SSS_CTRLQ_PFN(wq->page[0].align_paddr); + + ctrlq->cmd_info = kcalloc(ctrlq->wq.q_depth, sizeof(*ctrlq->cmd_info), + GFP_KERNEL); + if (!ctrlq->cmd_info) + return -ENOMEM; + + ctrlq->wrapped = SSS_CTRLQ_WRAP_ENABLE; + spin_lock_init(&ctrlq->ctrlq_lock); + + ctx->curr_wqe_page_pfn = SSS_SET_WQE_PAGE_PFN(pfn); + pfn = SSS_WQ_IS_0_LEVEL_CLA(wq) ? pfn : SSS_CTRLQ_PFN(wq_block_paddr); + ctx->wq_block_pfn = SSS_SET_WQ_BLOCK_PFN(wq, pfn); + + return 0; +} + +static void sss_deinit_ctrlq_info(struct sss_ctrlq *ctrlq) +{ + kfree(ctrlq->cmd_info); +} + +static void sss_flush_ctrlq_sync_cmd(struct sss_ctrlq_cmd_info *info) +{ + if (info->msg_type != SSS_MSG_TYPE_DIRECT_RESP && + info->msg_type != SSS_MSG_TYPE_SGE_RESP) + return; + + info->msg_type = SSS_MSG_TYPE_FORCE_STOP; + + if (info->cmpt_code && *info->cmpt_code == SSS_CTRLQ_SEND_CMPT_CODE) + *info->cmpt_code = SSS_CTRLQ_FORCE_STOP_CMPT_CODE; + + if (info->done) { + complete(info->done); + info->cmpt_code = NULL; + info->direct_resp = NULL; + info->err_code = NULL; + info->done = NULL; + } +} + +static void sss_flush_ctrlq_cmd(struct sss_ctrlq *ctrlq) +{ + u16 ci = 0; + + spin_lock_bh(&ctrlq->ctrlq_lock); + while (sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) { + sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ); + sss_flush_ctrlq_sync_cmd(&ctrlq->cmd_info[ci]); + } + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +static void sss_free_all_ctrlq_cmd_buff(struct sss_ctrlq *ctrlq) +{ + u16 i; + + for (i = 0; i < ctrlq->wq.q_depth; i++) + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[i]); +} + +static int sss_chip_set_ctrlq_ctx(struct sss_hwdev *hwdev, u8 qid, + struct sss_ctrlq_ctxt_info *ctxt) +{ + int ret; + struct sss_cmd_ctrlq_ctxt cmd_ctx = {0}; + u16 out_len = sizeof(cmd_ctx); + + memcpy(&cmd_ctx.ctxt, ctxt, sizeof(*ctxt)); + cmd_ctx.ctrlq_id = qid; + cmd_ctx.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CTRLQ_CTXT, + &cmd_ctx, sizeof(cmd_ctx), &cmd_ctx, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) { + sdk_err(hwdev->dev_hdl, + "Fail to set ctrlq ctx, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ctx.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_init_ctrlq_ctx(struct sss_hwdev *hwdev) +{ + u8 q_type; + int ret; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ret = sss_chip_set_ctrlq_ctx(hwdev, q_type, &ctrlq_info->ctrlq[q_type].ctrlq_ctxt); + if (ret != 0) + return ret; + } + + ctrlq_info->disable_flag = 0; + ctrlq_info->state |= SSS_CTRLQ_ENABLE; + + return 0; +} + +int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev) +{ + u8 ctrlq_type; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[ctrlq_type]); + sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[ctrlq_type]); + ctrlq_info->ctrlq[ctrlq_type].wrapped = 1; + sss_wq_reset(&ctrlq_info->ctrlq[ctrlq_type].wq); + } + + return sss_init_ctrlq_ctx(hwdev); +} + +static int sss_init_ctrlq(struct sss_hwdev *hwdev) +{ + u8 i; + u8 q_type; + int ret = -ENOMEM; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ctrlq_info = kzalloc(sizeof(*ctrlq_info), GFP_KERNEL); + if (!ctrlq_info) + return -ENOMEM; + + ctrlq_info->hwdev = hwdev; + hwdev->ctrlq_info = ctrlq_info; + + if (SSS_SUPPORT_CTRLQ_NUM(hwdev)) { + ctrlq_info->num = hwdev->glb_attr.ctrlq_num; + if (hwdev->glb_attr.ctrlq_num > SSS_MAX_CTRLQ_TYPE) { + sdk_warn(hwdev->dev_hdl, "Adjust ctrlq num to %d\n", SSS_MAX_CTRLQ_TYPE); + ctrlq_info->num = SSS_MAX_CTRLQ_TYPE; + } + } else { + ctrlq_info->num = SSS_MAX_CTRLQ_TYPE; + } + + ctrlq_info->msg_buf_pool = dma_pool_create(SSS_CTRLQ_DMA_POOL_NAME, hwdev->dev_hdl, + SSS_CTRLQ_BUF_LEN, SSS_CTRLQ_BUF_LEN, 0ULL); + if (!ctrlq_info->msg_buf_pool) { + sdk_err(hwdev->dev_hdl, "Fail to create ctrlq buffer pool\n"); + goto create_pool_err; + } + + ret = sss_create_ctrlq_wq(ctrlq_info); + if (ret != 0) + goto create_wq_err; + + ret = sss_alloc_db_addr(hwdev, (void __iomem *)&ctrlq_info->db_base); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc doorbell addr\n"); + goto init_db_err; + } + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ctrlq_info->ctrlq[q_type].hwdev = hwdev; + ctrlq_info->ctrlq[q_type].ctrlq_type = q_type; + ret = sss_init_ctrlq_info(&ctrlq_info->ctrlq[q_type], + &ctrlq_info->ctrlq[q_type].ctrlq_ctxt, + ctrlq_info->wq_block_paddr); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq i :%d\n", q_type); + goto init_ctrlq_info_err; + } + } + + ret = sss_init_ctrlq_ctx(hwdev); + if (ret != 0) + goto init_ctrlq_info_err; + + return 0; + +init_ctrlq_info_err: + for (i = 0; i < q_type; i++) + sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]); + + sss_free_db_addr(hwdev, ctrlq_info->db_base); +init_db_err: + sss_destroy_ctrlq_wq(ctrlq_info); +create_wq_err: + dma_pool_destroy(ctrlq_info->msg_buf_pool); +create_pool_err: + kfree(ctrlq_info); + hwdev->ctrlq_info = NULL; + + return ret; +} + +static void sss_deinit_ctrlq(struct sss_hwdev *hwdev) +{ + u8 i; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + ctrlq_info->state &= ~SSS_CTRLQ_ENABLE; + + for (i = 0; i < ctrlq_info->num; i++) { + sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[i]); + sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[i]); + sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]); + } + + sss_free_db_addr(hwdev, ctrlq_info->db_base); + sss_destroy_ctrlq_wq(ctrlq_info); + + dma_pool_destroy(ctrlq_info->msg_buf_pool); + + kfree(ctrlq_info); + hwdev->ctrlq_info = NULL; +} + +static int sss_set_ctrlq_depth(void *hwdev) +{ + int ret; + struct sss_cmd_root_ctxt cmd_ctx = {0}; + u16 out_len = sizeof(cmd_ctx); + + cmd_ctx.set_ctrlq_depth = 1; + cmd_ctx.ctrlq_depth = (u8)ilog2(SSS_CTRLQ_DEPTH); + cmd_ctx.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_VAT, &cmd_ctx, + sizeof(cmd_ctx), &cmd_ctx, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set ctrlq depth, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ctx.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_hwif_init_ctrlq(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_init_ctrlq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq\n"); + return ret; + } + + sss_ceq_register_cb(hwdev, hwdev, SSS_NIC_CTRLQ, sss_ctrlq_ceq_handler); + + ret = sss_set_ctrlq_depth(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set ctrlq depth\n"); + goto set_depth_err; + } + + set_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state); + + return 0; + +set_depth_err: + sss_deinit_ctrlq(hwdev); + + return ret; +} + +static void sss_hwif_deinit_ctrlq(struct sss_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_ceq_unregister_cb(hwdev, SSS_NIC_CTRLQ); + sss_deinit_ctrlq(hwdev); +} + +static bool sss_ctrlq_is_idle(struct sss_ctrlq *ctrlq) +{ + return sss_wq_is_empty(&ctrlq->wq); +} + +static enum sss_process_ret sss_check_ctrlq_stop_handler(void *priv_data) +{ + struct sss_hwdev *hwdev = priv_data; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + enum sss_ctrlq_type ctrlq_type; + + /* Stop waiting when card unpresent */ + if (!hwdev->chip_present_flag) + return SSS_PROCESS_OK; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type])) + return SSS_PROCESS_DOING; + } + + return SSS_PROCESS_OK; +} + +static int sss_init_ctrlq_page_size(struct sss_hwdev *hwdev) +{ + int ret; + + if (wq_page_num > SSS_MAX_WQ_PAGE_NUM) { + sdk_info(hwdev->dev_hdl, + "Invalid wq_page_num %u out of range, adjust to %d\n", + wq_page_num, SSS_MAX_WQ_PAGE_NUM); + wq_page_num = SSS_MAX_WQ_PAGE_NUM; + } + + hwdev->wq_page_size = SSS_GET_WQ_PAGE_SIZE(wq_page_num); + ret = sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev), + hwdev->wq_page_size); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set wq page size\n"); + return ret; + } + + return 0; +} + +static void sss_deinit_ctrlq_page_size(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev), + SSS_HW_WQ_PAGE_SIZE); +} + +int sss_init_ctrlq_channel(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_ceq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwdev ceq.\n"); + return ret; + } + + ret = sss_init_ceq_msix_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq msix attr\n"); + goto init_msix_err; + } + + ret = sss_init_ctrlq_page_size(hwdev); + if (ret != 0) + goto init_size_err; + + ret = sss_hwif_init_ctrlq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwif ctrlq\n"); + goto init_ctrlq_err; + } + + return 0; + +init_ctrlq_err: + sss_deinit_ctrlq_page_size(hwdev); +init_size_err: +init_msix_err: + sss_hwif_deinit_ceq(hwdev); + + return ret; +} + +void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev) +{ + sss_hwif_deinit_ctrlq(hwdev); + + sss_deinit_ctrlq_page_size(hwdev); + + sss_hwif_deinit_ceq(hwdev); +} + +void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev) +{ + u16 cnt; + u16 ci; + u16 i; + u16 id; + struct sss_wq *wq = NULL; + struct sss_ctrlq *ctrlq = NULL; + struct sss_ctrlq_cmd_info *info = NULL; + + ctrlq = &hwdev->ctrlq_info->ctrlq[SSS_CTRLQ_SYNC]; + + spin_lock_bh(&ctrlq->ctrlq_lock); + wq = &ctrlq->wq; + id = wq->pi + wq->q_depth - wq->ci; + cnt = (u16)SSS_WQ_MASK_ID(wq, id); + ci = wq->ci; + + for (i = 0; i < cnt; i++) { + info = &ctrlq->cmd_info[SSS_WQ_MASK_ID(wq, ci + i)]; + sss_flush_ctrlq_sync_cmd(info); + } + + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev) +{ + enum sss_ctrlq_type ctrlq_type; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + int ret; + + if (!(ctrlq_info->state & SSS_CTRLQ_ENABLE)) + return 0; + + ctrlq_info->state &= ~SSS_CTRLQ_ENABLE; + + ret = sss_check_handler_timeout(hwdev, sss_check_ctrlq_stop_handler, + SSS_CTRLQ_IDLE_TIMEOUT, USEC_PER_MSEC); + if (ret == 0) + return 0; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type])) + sdk_err(hwdev->dev_hdl, "Ctrlq %d is busy\n", ctrlq_type); + } + + ctrlq_info->state |= SSS_CTRLQ_ENABLE; + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h new file mode 100644 index 00000000000000..8aa0788c25bece --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CTRLQ_INIT_H +#define SSS_HWIF_CTRLQ_INIT_H + +#include "sss_hwdev.h" + +int sss_init_ctrlq_channel(struct sss_hwdev *hwdev); +void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev); +int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev); +int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev); +void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c new file mode 100644 index 00000000000000..d735f1bf68d78f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_csr.h" +#include "sss_hwif_eq.h" + +#define SSS_EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 +#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 +#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_SHIFT 30 +#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_SHIFT 24 + +#define SSS_EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU +#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U +#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_MASK 0x3U +#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_MASK 0xFFU + +#define SSS_SET_EQ_CI_SIMPLE_INDIR(val, member) \ + (((val) & SSS_EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ + SSS_EQ_CI_SIMPLE_INDIR_##member##_SHIFT) + +#define SSS_EQ_WRAPPED_SHIFT 20 + +#define SSS_EQ_CI(eq) ((eq)->ci | \ + ((u32)(eq)->wrap << SSS_EQ_WRAPPED_SHIFT)) + +#define SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ + SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) + +#define SSS_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SSS_AEQ) ? \ + SSS_AEQ_PHY_HI_ADDR_REG(pg_num) : \ + SSS_CEQ_PHY_HI_ADDR_REG(pg_num))) + +#define SSS_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SSS_AEQ) ? \ + SSS_AEQ_PHY_LO_ADDR_REG(pg_num) : \ + SSS_CEQ_PHY_LO_ADDR_REG(pg_num))) + +#define SSS_GET_EQ_PAGES_NUM(eq, size) \ + ((u16)(ALIGN((u32)((eq)->len * (eq)->entry_size), \ + (size)) / (size))) + +#define SSS_GET_EQ_MAX_PAGES(eq) \ + ((eq)->type == SSS_AEQ ? SSS_AEQ_MAX_PAGE : \ + SSS_CEQ_MAX_PAGE) + +#define SSS_GET_EQE_NUM(eq, pg_size) ((pg_size) / (u32)(eq)->entry_size) + +#define SSS_EQE_NUM_IS_ALIGN(eq) ((eq)->num_entry_per_pg & ((eq)->num_entry_per_pg - 1)) + +void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state) +{ + u32 val; + + if (eq->qid != 0 && SSS_TO_HWDEV(eq)->poll) + arm_state = SSS_EQ_NOT_ARMED; + + val = SSS_SET_EQ_CI_SIMPLE_INDIR(arm_state, ARMED) | + SSS_SET_EQ_CI_SIMPLE_INDIR(SSS_EQ_CI(eq), CI); + + if (eq->type == SSS_AEQ) + val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, AEQ_ID); + else + val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, CEQ_ID); + + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq), val); +} + +static void sss_chip_set_eq_page_addr(struct sss_eq *eq, + u16 page_id, struct sss_dma_addr_align *dma_addr) +{ + u32 addr; + + addr = SSS_EQ_HI_PHYS_ADDR_REG(eq->type, page_id); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr, + upper_32_bits(dma_addr->align_paddr)); + + addr = SSS_EQ_LO_PHYS_ADDR_REG(eq->type, page_id); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr, + lower_32_bits(dma_addr->align_paddr)); +} + +static int sss_chip_init_eq_attr(struct sss_eq *eq) +{ + u32 i; + int ret; + + for (i = 0; i < eq->page_num; i++) + sss_chip_set_eq_page_addr(eq, i, &eq->page_array[i]); + + ret = eq->init_attr_handler(eq); + if (ret != 0) + return ret; + + sss_chip_set_eq_ci(eq, SSS_EQ_ARMED); + + return 0; +} + +static u32 sss_init_eqe_desc(struct sss_eq *eq) +{ + eq->num_entry_per_pg = SSS_GET_EQE_NUM(eq, eq->page_size); + if (SSS_EQE_NUM_IS_ALIGN(eq)) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number element in eq page is not align\n"); + return -EINVAL; + } + + eq->init_desc_handler(eq); + + return 0; +} + +static int sss_alloc_eq_dma_page(struct sss_eq *eq, u16 id) +{ + int ret; + + ret = sss_dma_zalloc_coherent_align(SSS_TO_HWDEV(eq)->dev_hdl, eq->page_size, + SSS_MIN_EQ_PAGE_SIZE, GFP_KERNEL, &eq->page_array[id]); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Alloc eq page fail, pg index: %u\n", id); + return ret; + } + + return 0; +} + +static void sss_free_eq_dma_page(struct sss_eq *eq, u16 max_id) +{ + int i; + + for (i = 0; i < max_id; i++) + sss_dma_free_coherent_align(SSS_TO_DEV(eq->hwdev), &eq->page_array[i]); +} + +static int sss_alloc_eq_page(struct sss_eq *eq) +{ + u16 page_id; + int ret; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq); + + eq->page_array = kcalloc(eq->page_num, sizeof(*eq->page_array), GFP_KERNEL); + if (!eq->page_array) + return -ENOMEM; + + for (page_id = 0; page_id < eq->page_num; page_id++) { + ret = sss_alloc_eq_dma_page(eq, page_id); + if (ret != 0) + goto alloc_dma_err; + } + + ret = sss_init_eqe_desc(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eqe\n"); + goto alloc_dma_err; + } + + return 0; + +alloc_dma_err: + sss_free_eq_dma_page(eq, page_id); + kfree(eq->page_array); + eq->page_array = NULL; + + return ret; +} + +static void sss_free_eq_page(struct sss_eq *eq) +{ + u16 i; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq); + + for (i = 0; i < eq->page_num; i++) + sss_dma_free_coherent_align(hwdev->dev_hdl, &eq->page_array[i]); + + kfree(eq->page_array); + eq->page_array = NULL; +} + +static inline u32 sss_get_eq_page_size(const struct sss_eq *eq) +{ + u32 total_size; + u32 count; + + total_size = ALIGN((eq->len * eq->entry_size), + SSS_MIN_EQ_PAGE_SIZE); + if (total_size <= (SSS_GET_EQ_MAX_PAGES(eq) * SSS_MIN_EQ_PAGE_SIZE)) + return SSS_MIN_EQ_PAGE_SIZE; + + count = (u32)(ALIGN((total_size / SSS_GET_EQ_MAX_PAGES(eq)), + SSS_MIN_EQ_PAGE_SIZE) / SSS_MIN_EQ_PAGE_SIZE); + + /* round up to nearest power of two */ + count = 1U << (u8)fls((int)(count - 1)); + + return ((u32)SSS_MIN_EQ_PAGE_SIZE) * count; +} + +static int sss_request_eq_irq(struct sss_eq *eq, struct sss_irq_desc *entry) +{ + struct pci_dev *pdev = SSS_TO_HWDEV(eq)->pcidev_hdl; + + snprintf(eq->irq_name, sizeof(eq->irq_name), "%s%u@pci:%s", + eq->name, eq->qid, pci_name(pdev)); + + return request_irq(entry->irq_id, eq->irq_handler, 0UL, eq->irq_name, eq); +} + +static void sss_chip_reset_eq(struct sss_eq *eq) +{ + struct sss_hwdev *hwdev = eq->hwdev; + struct sss_hwif *hwif = hwdev->hwif; + + sss_chip_write_reg(hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid); + + /* make sure set qid firstly*/ + wmb(); + + if (eq->type == SSS_AEQ) + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0); + else + sss_chip_set_ceq_attr(hwdev, eq->qid, 0, 0); + + /* make sure write ctrl reg secondly */ + wmb(); + + sss_chip_write_reg(hwif, SSS_EQ_PI_REG_ADDR(eq), 0); +} + +static int sss_init_eq_page_size(struct sss_eq *eq) +{ + eq->page_size = sss_get_eq_page_size(eq); + eq->old_page_size = eq->page_size; + eq->page_num = SSS_GET_EQ_PAGES_NUM(eq, eq->page_size); + + if (eq->page_num > SSS_GET_EQ_MAX_PAGES(eq)) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number pages: %u too many pages for eq\n", + eq->page_num); + return -EINVAL; + } + + return 0; +} + +void sss_increase_eq_ci(struct sss_eq *eq) +{ + if (!eq) + return; + + eq->ci++; + + if (eq->ci == eq->len) { + eq->ci = 0; + eq->wrap = !eq->wrap; + } +} + +int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq, + struct sss_irq_desc *entry) +{ + int ret = 0; + + eq->hwdev = hwdev; + eq->irq_desc.irq_id = entry->irq_id; + eq->irq_desc.msix_id = entry->msix_id; + + ret = sss_init_eq_page_size(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eq params\n"); + return ret; + } + + ret = sss_alloc_eq_page(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc eq page\n"); + return ret; + } + + sss_chip_reset_eq(eq); + + ret = sss_chip_init_eq_attr(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eq attr\n"); + goto out; + } + + ret = sss_request_eq_irq(eq, entry); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to request eq irq, err: %d\n", ret); + goto out; + } + + sss_chip_set_msix_state(hwdev, SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE); + + return 0; + +out: + sss_free_eq_page(eq); + return ret; +} + +void sss_deinit_eq(struct sss_eq *eq) +{ + struct sss_irq_desc *irq = &eq->irq_desc; + + sss_chip_set_msix_state(SSS_TO_HWDEV(eq), SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE); + + synchronize_irq(irq->irq_id); + + free_irq(irq->irq_id, eq); + + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid); + + /* make sure disable msix */ + wmb(); + + if (eq->type == SSS_AEQ) { + cancel_work_sync(&eq->aeq_work); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + sss_chip_set_ceq_attr(SSS_TO_HWDEV(eq), eq->qid, 0, 0); + } + + eq->ci = sss_chip_read_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_PI_REG_ADDR(eq)); + sss_chip_set_eq_ci(eq, SSS_EQ_NOT_ARMED); + + sss_free_eq_page(eq); +} + +void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info) +{ + intr_info->coalesc_intr_set = SSS_EQ_INTR_COALESC; + intr_info->coalesc_timer = SSS_EQ_INTR_COALESC_TIMER_CFG; + intr_info->resend_timer = SSS_EQ_INTR_RESEND_TIMER_CFG; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h new file mode 100644 index 00000000000000..45db82abb497c8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_EQ_H +#define SSS_HWIF_EQ_H + +#include +#include +#include + +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_csr.h" + +#define SSS_EQ_UPDATE_CI_STEP 64 + +#define SSS_TASK_PROCESS_EQE_LIMIT 1024 + +#define SSS_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ +#define SSS_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ + +#define SSS_EQ_USLEEP_LOW_LIMIT 900 +#define SSS_EQ_USLEEP_HIG_LIMIT 1000 + +#define SSS_EQ_IRQ_ID(eq) ((eq)->irq_desc.msix_id) + +#define SSS_GET_EQ_ELEM(eq, id) \ + (((u8 *)(eq)->page_array[(id) / (eq)->num_entry_per_pg].align_vaddr) + \ + (u32)(((id) & ((eq)->num_entry_per_pg - 1)) * (eq)->entry_size)) + +#define SSS_EQ_VALID_SHIFT 31 +#define SSS_EQ_WRAPPED(eq) ((u32)(eq)->wrap << SSS_EQ_VALID_SHIFT) + +#define SSS_AEQ_MAX_PAGE 4 +#define SSS_CEQ_MAX_PAGE 8 + +#define SSS_AEQE_SIZE 64 +#define SSS_CEQE_SIZE 4 + +#define SSS_EQ_CI_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_CI_ADDR : SSS_CSR_CEQ_CI_ADDR) + +#define SSS_EQ_PI_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_PI_ADDR : SSS_CSR_CEQ_PI_ADDR) + +#define SSS_EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define SSS_EQ_ELEM_DESC_TYPE_SHIFT 0 +#define SSS_EQ_ELEM_DESC_SRC_SHIFT 7 +#define SSS_EQ_ELEM_DESC_SIZE_SHIFT 8 +#define SSS_EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define SSS_EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define SSS_EQ_ELEM_DESC_SRC_MASK 0x1U +#define SSS_EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define SSS_EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define SSS_GET_EQE_DESC(val, member) \ + (((val) >> SSS_EQ_ELEM_DESC_##member##_SHIFT) & \ + SSS_EQ_ELEM_DESC_##member##_MASK) + +#define SSS_PAGE_IN_4K(page_size) ((page_size) >> 12) +#define SSS_SET_EQ_HW_PAGE_SIZE(eq) ((u32)ilog2(SSS_PAGE_IN_4K((eq)->page_size))) + +enum sss_eq_intr_mode { + SSS_INTR_MODE_ARMED, + SSS_INTR_MODE_ALWAY, +}; + +enum sss_eq_ci_arm_state { + SSS_EQ_NOT_ARMED, + SSS_EQ_ARMED, +}; + +#define SSS_EQ_ARM_STATE(unfinish) \ + ((unfinish) ? SSS_EQ_NOT_ARMED : SSS_EQ_ARMED) + +#define SSS_EQ_INTR_COALESC 1 +#define SSS_EQ_INTR_COALESC_TIMER_CFG 0xFF +#define SSS_EQ_INTR_RESEND_TIMER_CFG 7 + +void sss_increase_eq_ci(struct sss_eq *eq); +int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq, + struct sss_irq_desc *entry); +void sss_deinit_eq(struct sss_eq *eq); +void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state); +void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c new file mode 100644 index 00000000000000..e43a4124422ad1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_irq.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" + +int sss_alloc_db_addr(void *hwdev, void __iomem **db_base) +{ + struct sss_hwif *hwif = NULL; + u32 id = 0; + + int ret; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = SSS_TO_HWIF(hwdev); + + ret = sss_alloc_db_id(hwif, &id); + if (ret != 0) + return -EFAULT; + + *db_base = hwif->db_base_vaddr + id * SSS_DB_PAGE_SIZE; + + return 0; +} +EXPORT_SYMBOL(sss_alloc_db_addr); + +void sss_free_db_addr(void *hwdev, const void __iomem *db_base) +{ + struct sss_hwif *hwif = NULL; + u32 id; + + if (!hwdev || !db_base) + return; + + hwif = SSS_TO_HWIF(hwdev); + id = SSS_DB_ID(db_base, hwif->db_base_vaddr); + + sss_free_db_id(hwif, id); +} +EXPORT_SYMBOL(sss_free_db_addr); + +void sss_chip_set_msix_auto_mask(void *hwdev, u16 msix_id, + enum sss_msix_auto_mask flag) +{ + u32 val; + + if (!hwdev) + return; + + val = (flag == SSS_CLR_MSIX_AUTO_MASK) ? + SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_CLR) : + SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_SET); + + val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_set_msix_auto_mask); + +void sss_chip_set_msix_state(void *hwdev, u16 msix_id, + enum sss_msix_state flag) +{ + u32 val; + + if (!hwdev) + return; + + val = (flag == SSS_MSIX_ENABLE) ? SSS_SET_MSI_CLR_INDIR(1, INT_MSK_CLR) : + SSS_SET_MSI_CLR_INDIR(1, INT_MSK_SET); + val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_set_msix_state); + +u16 sss_get_global_func_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_GLOBAL_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_global_func_id); + +u8 sss_get_pf_id_of_vf(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_pf_id_of_vf); + +u8 sss_get_pcie_itf_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_pcie_itf_id); + +enum sss_func_type sss_get_func_type(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_func_type); + +enum sss_func_type sss_get_func_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_FUNC_ID((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_func_id); + +u16 sss_get_glb_pf_vf_offset(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_GLOBAL_VF_OFFSET(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_glb_pf_vf_offset); + +u8 sss_get_ppf_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PPF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_ppf_id); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c new file mode 100644 index 00000000000000..5451b85ffac886 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_init.h" +#include "sss_hwif_api.h" + +#define SSS_WAIT_CHIP_READY_TIMEOUT 10000 + +#define SSS_WAIT_DB_READY_TIMEOUT 60000 + +#define SSS_MAX_MSIX_ENTRY 2048 + +#define SSS_AF0_FUNC_GLOBAL_ID_SHIFT 0 +#define SSS_AF0_PF_ID_SHIFT 12 +#define SSS_AF0_PCI_INTF_ID_SHIFT 17 +#define SSS_AF0_VF_IN_PF_SHIFT 20 +#define SSS_AF0_FUNC_TYPE_SHIFT 28 + +#define SSS_AF0_FUNC_GLOBAL_ID_MASK 0xFFF +#define SSS_AF0_PF_ID_MASK 0x1F +#define SSS_AF0_PCI_INTF_ID_MASK 0x7 +#define SSS_AF0_VF_IN_PF_MASK 0xFF +#define SSS_AF0_FUNC_TYPE_MASK 0x1 + +#define SSS_GET_AF0(val, member) \ + (((val) >> SSS_AF0_##member##_SHIFT) & SSS_AF0_##member##_MASK) + +#define SSS_AF2_CEQ_PER_FUNC_SHIFT 0 +#define SSS_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 +#define SSS_AF2_IRQ_PER_FUNC_SHIFT 16 + +#define SSS_AF2_CEQ_PER_FUNC_MASK 0x1FF +#define SSS_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 +#define SSS_AF2_IRQ_PER_FUNC_MASK 0x7FF + +#define SSS_GET_AF2(val, member) \ + (((val) >> SSS_AF2_##member##_SHIFT) & SSS_AF2_##member##_MASK) + +#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 +#define SSS_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 + +#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF +#define SSS_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF + +#define SSS_GET_AF3(val, member) \ + (((val) >> SSS_AF3_##member##_SHIFT) & SSS_AF3_##member##_MASK) + +#define SSS_AF5_OUTBOUND_CTRL_SHIFT 0 +#define SSS_AF5_OUTBOUND_CTRL_MASK 0x1 + +#define SSS_GET_AF5(val, member) \ + (((val) >> SSS_AF5_##member##_SHIFT) & SSS_AF5_##member##_MASK) + +#define SSS_SET_AF5(val, member) \ + (((val) & SSS_AF5_##member##_MASK) << SSS_AF5_##member##_SHIFT) + +#define SSS_CLEAR_AF5(val, member) \ + ((val) & (~(SSS_AF5_##member##_MASK << SSS_AF5_##member##_SHIFT))) + +#define SSS_MPF_ELECTION_ID_SHIFT 0 + +#define SSS_MPF_ELECTION_ID_MASK 0x1F + +#define SSS_SET_MPF(val, member) \ + (((val) & SSS_MPF_ELECTION_##member##_MASK) << \ + SSS_MPF_ELECTION_##member##_SHIFT) + +#define SSS_GET_MPF(val, member) \ + (((val) >> SSS_MPF_ELECTION_##member##_SHIFT) & \ + SSS_MPF_ELECTION_##member##_MASK) + +#define SSS_CLEAR_MPF(val, member) \ + ((val) & (~(SSS_MPF_ELECTION_##member##_MASK << \ + SSS_MPF_ELECTION_##member##_SHIFT))) + +static enum sss_process_ret sss_check_pcie_link_handle(void *data) +{ + u32 status; + + status = sss_chip_get_pcie_link_status(data); + if (status == SSS_PCIE_LINK_DOWN) + return SSS_PROCESS_ERR; + else if (status == SSS_PCIE_LINK_UP) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static int sss_wait_pcie_link_up(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_check_handler_timeout(hwdev, sss_check_pcie_link_handle, + SSS_WAIT_CHIP_READY_TIMEOUT, USEC_PER_MSEC); + if (ret == -ETIMEDOUT) + sdk_err(hwdev->dev_hdl, "Wait for chip ready timeout\n"); + + return ret; +} + +static int sss_chip_get_func_attr0(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR0_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_GLOBAL_ID(hwif, SSS_GET_AF0(attr, FUNC_GLOBAL_ID)); + SSS_SET_HWIF_PF_ID(hwif, SSS_GET_AF0(attr, PF_ID)); + SSS_SET_HWIF_PCI_INTF_ID(hwif, SSS_GET_AF0(attr, PCI_INTF_ID)); + SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_GET_AF0(attr, FUNC_TYPE)); + + return 0; +} + +static int sss_chip_get_func_attr1(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR1_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_AF1(attr, PPF_ID)); + SSS_SET_HWIF_AEQ_NUM(hwif, BIT(SSS_GET_AF1(attr, AEQ_PER_FUNC))); + + return 0; +} + +static int sss_chip_get_func_attr2(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR2_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_CEQ_NUM(hwif, (u8)SSS_GET_AF2(attr, CEQ_PER_FUNC)); + SSS_SET_HWIF_IRQ_NUM(hwif, SSS_GET_AF2(attr, IRQ_PER_FUNC)); + if (SSS_GET_HWIF_IRQ_NUM(hwif) > SSS_MAX_MSIX_ENTRY) + SSS_SET_HWIF_IRQ_NUM(hwif, SSS_MAX_MSIX_ENTRY); + SSS_SET_HWIF_DMA_ATTR_NUM(hwif, BIT(SSS_GET_AF2(attr, DMA_ATTR_PER_FUNC))); + + return 0; +} + +static int sss_chip_get_func_attr3(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR3_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, SSS_GET_AF3(attr, GLOBAL_VF_ID_OF_PF)); + + return 0; +} + +static int sss_chip_get_func_attr6(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_SQ_NUM(hwif, SSS_GET_AF6(attr, FUNC_MAX_SQ)); + SSS_SET_HWIF_MSIX_EN(hwif, SSS_GET_AF6(attr, MSIX_FLEX_EN)); + + return 0; +} + +static int sss_hwif_init_func_attr(struct sss_hwif *hwif) +{ + int ret; + + ret = sss_chip_get_func_attr0(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr1(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr2(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr3(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr6(hwif); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_chip_init_ppf(struct sss_hwif *hwif) +{ + u32 val; + + val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR); + val = SSS_CLEAR_PPF(val, ID); + val |= SSS_SET_PPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID); + + sss_chip_write_reg(hwif, SSS_CSR_PPF_ELECT_ADDR, val); + + /* Check PPF */ + val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR); + SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_PPF(val, ID)); + if (SSS_GET_HWIF_PPF_ID(hwif) == SSS_GET_HWIF_GLOBAL_ID(hwif)) + SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_FUNC_TYPE_PPF); +} + +static void sss_chip_get_mpf(struct sss_hwif *hwif) +{ + u32 mpf; + + mpf = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR); + SSS_SET_HWIF_MPF_ID(hwif, SSS_GET_MPF(mpf, ID)); +} + +static void sss_chip_init_mpf(struct sss_hwif *hwif) +{ + u32 val; + + val = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR); + val = SSS_CLEAR_MPF(val, ID); + val |= SSS_SET_MPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID); + + sss_chip_write_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR, val); +} + +static int sss_hwif_alloc_db_pool(struct sss_hwif *hwif) +{ + struct sss_db_pool *pool = &hwif->db_pool; + u32 bit_size; + + bit_size = (hwif->db_dwqe_len > SSS_DB_DWQE_SIZE) ? SSS_DB_MAX_AREAS : + ((u32)(hwif->db_dwqe_len / SSS_DB_PAGE_SIZE)); + pool->bitmap = bitmap_zalloc(bit_size, GFP_KERNEL); + if (!pool->bitmap) { + pr_err("Fail to allocate db area.\n"); + return -ENOMEM; + } + pool->bit_size = bit_size; + spin_lock_init(&pool->id_lock); + + return 0; +} + +static void sss_hwif_free_db_pool(struct sss_db_pool *pool) +{ + kfree(pool->bitmap); +} + +static void sss_chip_disable_all_msix(struct sss_hwdev *hwdev) +{ + u16 i; + u16 irq_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif); + + for (i = 0; i < irq_num; i++) + sss_chip_set_msix_state(hwdev, i, SSS_MSIX_DISABLE); +} + +static enum sss_process_ret sss_chip_check_db_ready(void *data) +{ + int outbound_status; + int db_status; + struct sss_hwif *hwif = data; + u32 db_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR4_ADDR); + u32 outband_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR5_ADDR); + + db_status = SSS_GET_AF4(db_attr, DOORBELL_CTRL); + outbound_status = SSS_GET_AF5(outband_attr, OUTBOUND_CTRL); + + if (db_status == DB_ENABLE && outbound_status == OUTBOUND_ENABLE) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static int sss_wait_db_ready(struct sss_hwif *hwif) +{ + return sss_check_handler_timeout(hwif, sss_chip_check_db_ready, + SSS_WAIT_DB_READY_TIMEOUT, USEC_PER_MSEC); +} + +static void sss_hwif_init_bar_base(struct sss_pci_adapter *adapter) +{ + struct sss_hwif *hwif = SSS_TO_HWIF(adapter->hwdev); + + hwif->db_dwqe_len = adapter->db_dwqe_len; + hwif->db_base_vaddr = adapter->db_reg_bar; + hwif->db_base_paddr = adapter->db_base_paddr; + + hwif->mgmt_reg_base = adapter->mgmt_reg_bar; + hwif->cfg_reg_base = (adapter->mgmt_reg_bar) ? + adapter->cfg_reg_bar : + ((u8 *)adapter->cfg_reg_bar + SSS_VF_CFG_REG_OFFSET); +} + +static int sss_hwif_wait_chip_ready(struct sss_hwdev *hwdev) +{ + int ret; + u32 db_attr; + u32 outband_attr; + + ret = sss_wait_pcie_link_up(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Pcie is not link up\n"); + return ret; + } + + ret = sss_wait_db_ready(hwdev->hwif); + if (ret != 0) { + db_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR4_ADDR); + outband_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR5_ADDR); + sdk_err(hwdev->dev_hdl, "Hw doorbell is disabled, db 0x%x outbound 0x%x\n", + db_attr, outband_attr); + return ret; + } + + return 0; +} + +static void sss_hwif_init_pf(struct sss_hwdev *hwdev) +{ + struct sss_hwif *hwif = hwdev->hwif; + + if (!SSS_IS_VF(hwdev)) { + sss_chip_init_ppf(hwif); + + if (SSS_IS_PPF(hwdev)) + sss_chip_init_mpf(hwif); + sss_chip_get_mpf(hwif); + } + + sss_chip_disable_all_msix(hwdev); + + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_INIT); + + sdk_info(hwdev->dev_hdl, + "Global_func_id: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", + SSS_GET_HWIF_GLOBAL_ID(hwif), SSS_GET_HWIF_FUNC_TYPE(hwif), + SSS_GET_HWIF_PCI_INTF_ID(hwif), SSS_GET_HWIF_PPF_ID(hwif), + SSS_GET_HWIF_MPF_ID(hwif)); +} + +int sss_hwif_init(struct sss_pci_adapter *adapter) +{ + struct sss_hwdev *hwdev = adapter->hwdev; + struct sss_hwif *hwif = NULL; + int ret; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwif->pdev = hwdev->pcidev_hdl; + hwdev->hwif = hwif; + + sss_hwif_init_bar_base(adapter); + + ret = sss_hwif_alloc_db_pool(hwif); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init db pool.\n"); + goto alloc_db_pool_err; + } + + ret = sss_hwif_wait_chip_ready(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Chip is not ready\n"); + goto wait_chip_ready_err; + } + + ret = sss_hwif_init_func_attr(hwif); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail init hwif attr\n"); + goto wait_chip_ready_err; + } + + sss_hwif_init_pf(hwdev); + + return 0; + +wait_chip_ready_err: + sss_dump_chip_err_info(hwdev); + sss_hwif_free_db_pool(&hwif->db_pool); +alloc_db_pool_err: + kfree(hwif); + hwdev->hwif = NULL; + + return ret; +} + +void sss_hwif_deinit(struct sss_hwdev *hwdev) +{ + sss_hwif_free_db_pool(&hwdev->hwif->db_pool); + kfree(hwdev->hwif); + hwdev->hwif = NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h new file mode 100644 index 00000000000000..ca5e2ce972e554 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_INIT_H +#define SSS_HWIF_INIT_H + +#include "sss_hwdev.h" +#include "sss_adapter.h" + +int sss_hwif_init(struct sss_pci_adapter *adapter); +void sss_hwif_deinit(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c new file mode 100644 index 00000000000000..efed8438ec3dbc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_svc_cap.h" +#include "sss_hwif_irq.h" + +#define SSS_GET_NEED_IRQ_NUM(hwif, intr_num) \ + (SSS_GET_HWIF_MSIX_EN(hwif) ? (SSS_GET_HWIF_AEQ_NUM(hwif) + \ + SSS_GET_HWIF_CEQ_NUM(hwif) + (hwif)->attr.sq_num) : (intr_num)) + +#define SSS_MIN_VECTOR 2 + +static int sss_alloc_irq_info(struct sss_hwdev *hwdev) +{ + u16 total_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif); + u16 need_num = SSS_GET_NEED_IRQ_NUM(hwdev->hwif, total_num); + struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info; + struct sss_irq_info *irq_info = &mgmt_info->irq_info; + + if (total_num == 0) { + sdk_err(hwdev->dev_hdl, "Mgmt irq info: intr total_num = 0, msix_flex_en %d\n", + SSS_GET_HWIF_MSIX_EN(hwdev->hwif)); + return -EFAULT; + } + + if (need_num > total_num) { + sdk_warn(hwdev->dev_hdl, "Mgmt irq info: intr total_num %d < need_num %d, msix_flex_en %d\n", + total_num, need_num, SSS_GET_HWIF_MSIX_EN(hwdev->hwif)); + need_num = total_num; + } + + irq_info->irq = kcalloc(total_num, sizeof(*irq_info->irq), GFP_KERNEL); + if (!irq_info->irq) + return -ENOMEM; + + irq_info->max_num = need_num; + + return 0; +} + +static void sss_free_irq_info(struct sss_hwdev *hwdev) +{ + kfree(hwdev->mgmt_info->irq_info.irq); + hwdev->mgmt_info->irq_info.irq = NULL; +} + +int sss_init_irq_info(struct sss_hwdev *hwdev) +{ + u16 i = 0; + u16 irq_num; + int enable_irq_num; + int ret; + struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info; + struct sss_irq *irq = NULL; + struct msix_entry *entry = NULL; + + ret = sss_alloc_irq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq info, err: %d\n", ret); + return ret; + } + + irq_num = mgmt_info->irq_info.max_num; + entry = kcalloc(irq_num, sizeof(*entry), GFP_KERNEL); + if (!entry) { + sss_free_irq_info(hwdev); + return -ENOMEM; + } + + for (i = 0; i < irq_num; i++) + entry[i].entry = i; + + enable_irq_num = pci_enable_msix_range(hwdev->pcidev_hdl, entry, + SSS_MIN_VECTOR, irq_num); + if (enable_irq_num < 0) { + kfree(entry); + sss_free_irq_info(hwdev); + sdk_err(hwdev->dev_hdl, "Fail to alloc msix entries with threshold 2. enabled_irq: %d\n", + enable_irq_num); + return -ENOMEM; + } + + irq_num = (u16)enable_irq_num; + mgmt_info->irq_info.total_num = irq_num; + mgmt_info->irq_info.free_num = irq_num; + mgmt_info->svc_cap.intr_type = SSS_INTR_TYPE_MSIX; + + irq = mgmt_info->irq_info.irq; + for (i = 0; i < irq_num; i++) { + irq[i].desc.msix_id = entry[i].entry; + irq[i].desc.irq_id = entry[i].vector; + irq[i].type = SSS_SERVICE_TYPE_MAX; + irq[i].busy = SSS_CFG_FREE; + } + + mutex_init(&mgmt_info->irq_info.irq_mutex); + + sdk_info(hwdev->dev_hdl, "Success to request %u msix vector.\n", irq_num); + kfree(entry); + + return 0; +} + +void sss_deinit_irq_info(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + struct sss_irq_info *irq_info = &hwdev->mgmt_info->irq_info; + + if (irq_info->free_num != irq_info->total_num) + sdk_err(hwdev->dev_hdl, "Fail to reclaim all irq and eq, please check\n"); + + if (svc_cap->intr_type == SSS_INTR_TYPE_MSIX) + pci_disable_msix(hwdev->pcidev_hdl); + else if (svc_cap->intr_type == SSS_INTR_TYPE_MSI) + pci_disable_msi(hwdev->pcidev_hdl); + + sss_free_irq_info(hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h new file mode 100644 index 00000000000000..0918d74ebaa011 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_IRQ_H +#define SSS_HWIF_IRQ_H + +#include "sss_hwdev.h" + +int sss_init_irq_info(struct sss_hwdev *dev); +void sss_deinit_irq_info(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c new file mode 100644 index 00000000000000..4490e4378cbca6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_aeq.h" +#include "sss_csr.h" +#include "sss_common.h" + +#define SSS_MBX_INT_DST_AEQN_SHIFT 10 +#define SSS_MBX_INT_SRC_RESP_AEQN_SHIFT 12 +#define SSS_MBX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define SSS_MBX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define SSS_MBX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define SSS_MBX_INT_WB_EN_SHIFT 28 + +#define SSS_MBX_INT_DST_AEQN_MASK 0x3 +#define SSS_MBX_INT_SRC_RESP_AEQN_MASK 0x3 +#define SSS_MBX_INT_STAT_DMA_MASK 0x3F +#define SSS_MBX_INT_TX_SIZE_MASK 0x1F +#define SSS_MBX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define SSS_MBX_INT_WB_EN_MASK 0x1 + +#define SSS_SET_MBX_INT(val, field) \ + (((val) & SSS_MBX_INT_##field##_MASK) << \ + SSS_MBX_INT_##field##_SHIFT) + +enum sss_mbx_tx_status { + SSS_MBX_TX_NOT_COMPLETE = 1, +}; + +#define SSS_MBX_CTRL_TRIGGER_AEQE_SHIFT 0 + +#define SSS_MBX_CTRL_TX_STATUS_SHIFT 1 +#define SSS_MBX_CTRL_DST_FUNC_SHIFT 16 + +#define SSS_MBX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define SSS_MBX_CTRL_TX_STATUS_MASK 0x1 +#define SSS_MBX_CTRL_DST_FUNC_MASK 0x1FFF + +#define SSS_SET_MBX_CTRL(val, field) \ + (((val) & SSS_MBX_CTRL_##field##_MASK) << \ + SSS_MBX_CTRL_##field##_SHIFT) + +#define SSS_MBX_SEGLEN_MASK \ + SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEG_LEN_MASK, SEG_LEN) + +#define SSS_MBX_MSG_POLL_TIMEOUT_MS 8000 +#define SSS_MBX_COMPLETE_WAIT_TIME_MS 40000U + +#define SSS_SEQ_ID_START_VAL 0 + +/* mbx write back status is 16B, only first 4B is used */ +#define SSS_MBX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define SSS_MBX_WB_STATUS_MASK 0xFF +#define SSS_MBX_WB_ERRCODE_MASK 0xFF00 +#define SSS_MBX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define SSS_MBX_WB_STATUS_NOT_FINISHED 0x00 + +#define SSS_MBX_STATUS_FINISHED(wb) \ + (((wb) & SSS_MBX_WB_STATUS_MASK) != SSS_MBX_WB_STATUS_NOT_FINISHED) +#define SSS_MBX_STATUS_SUCCESS(wb) \ + (((wb) & SSS_MBX_WB_STATUS_MASK) == SSS_MBX_WB_STATUS_FINISHED_SUCCESS) +#define SSS_MBX_STATUS_ERRCODE(wb) \ + ((wb) & SSS_MBX_WB_ERRCODE_MASK) + +#define SSS_NO_DMA_ATTR 0 + +#define SSS_MBX_MSG_ID_MASK 0xF +#define SSS_MBX_MSG_ID(mbx) ((mbx)->send_msg_id) +#define SSS_INCREASE_MBX_MSG_ID(mbx) \ + ((mbx)->send_msg_id = ((mbx)->send_msg_id + 1) & SSS_MBX_MSG_ID_MASK) + +#define SSS_MBX_MSG_CHN_STOP(mbx) \ + ((((mbx)->lock_channel_en) && \ + test_bit((mbx)->cur_msg_channel, &(mbx)->channel_stop)) ? true : false) + +#define SSS_MBX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a +#define SSS_MBX_XOR_DATA_ALIGN 4 + +#define SSS_MQ_ID_MASK(mq, id) ((id) & ((mq)->depth - 1)) +#define SSS_IS_MSG_QUEUE_FULL(mq) \ + (SSS_MQ_ID_MASK(mq, (mq)->pi + 1) == SSS_MQ_ID_MASK(mq, (mq)->ci)) + +#define SSS_MBX_TRY_LOCK_SLEPP_US 1000 + +#define SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod, ack_type, type, direction, cmd) \ + (SSS_SET_MSG_HEADER((msg_len), MSG_LEN) | \ + SSS_SET_MSG_HEADER((mod), MODULE) | \ + SSS_SET_MSG_HEADER(SSS_MBX_SEG_SIZE, SEG_LEN) | \ + SSS_SET_MSG_HEADER((ack_type), NO_ACK) | \ + SSS_SET_MSG_HEADER((type), DATA_TYPE) | \ + SSS_SET_MSG_HEADER(SSS_SEQ_ID_START_VAL, SEQID) | \ + SSS_SET_MSG_HEADER(SSS_NOT_LAST_SEG, LAST) | \ + SSS_SET_MSG_HEADER((direction), DIRECTION) | \ + SSS_SET_MSG_HEADER((cmd), CMD) | \ + SSS_SET_MSG_HEADER((msg_info)->msg_id, MSG_ID) | \ + SSS_SET_MSG_HEADER((((hwdev)->poll || \ + (hwdev)->hwif->attr.aeq_num >= SSS_MGMT_RSP_MSG_AEQ) ? \ + SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ), AEQ_ID) | \ + SSS_SET_MSG_HEADER(SSS_MSG_SRC_MBX, SOURCE) | \ + SSS_SET_MSG_HEADER(!!(msg_info)->state, STATUS) | \ + SSS_SET_MSG_HEADER(sss_get_global_func_id(hwdev), SRC_GLB_FUNC_ID)) + +#define SSS_MBX_SEG_LEN_ALIGN 4 + +enum sss_msg_aeq_type { + SSS_ASYNC_MSG_AEQ = 0, + /* indicate dest func or mgmt cpu which aeq to response mbx message */ + SSS_MBX_RSP_MSG_AEQ = 1, + /* indicate mgmt cpu which aeq to response adm message */ + SSS_MGMT_RSP_MSG_AEQ = 2, +}; + +enum sss_mbx_order_type { + SSS_MBX_STRONG_ORDER, +}; + +enum sss_mbx_wb_type { + SSS_MBX_WB = 1, +}; + +enum sss_mbx_aeq_trig_type { + SSS_MBX_NOT_TRIG, +}; + +struct sss_mbx_dma_msg { + u32 xor; + u32 dma_addr_h; + u32 dma_addr_l; + u32 msg_len; + u64 rsvd; +}; + +static struct sss_msg_buffer *sss_get_msg_buffer_from_mgmt(struct sss_mbx *mbx) +{ + return &mbx->mgmt_msg; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_pf(struct sss_mbx *mbx, u64 src_func_id) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (src_func_id != sss_get_pf_id_of_vf(hwdev) || !mbx->func_msg) + return NULL; + + return mbx->func_msg; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_vf(struct sss_mbx *mbx, u64 src_func_id) +{ + u16 func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + func_id = (u16)(src_func_id - 1U) - sss_get_glb_pf_vf_offset(hwdev); + if (func_id >= mbx->num_func_msg) + return NULL; + + return &mbx->func_msg[func_id]; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_ppf(struct sss_mbx *mbx, u64 src_func_id) +{ + u16 func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (!mbx->support_h2h_msg) + return NULL; + + for (func_id = 0; func_id < SSS_MAX_HOST_NUM(hwdev); func_id++) { + if (src_func_id == sss_chip_get_host_ppf_id(hwdev, (u8)func_id)) + break; + } + + if (func_id == SSS_MAX_HOST_NUM(hwdev) || !mbx->host_msg) + return NULL; + + return &mbx->host_msg[func_id]; +} + +struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_msg_buffer *msg_buffer = NULL; + + if (src_func_id == SSS_MGMT_SRC_ID) + msg_buffer = sss_get_msg_buffer_from_mgmt(mbx); + else if (SSS_IS_VF(hwdev)) + msg_buffer = sss_get_msg_buffer_from_pf(mbx, src_func_id); + else if (src_func_id > sss_get_glb_pf_vf_offset(hwdev)) + msg_buffer = sss_get_msg_buffer_from_vf(mbx, src_func_id); + else + msg_buffer = sss_get_msg_buffer_from_ppf(mbx, src_func_id); + + return (direction == SSS_DIRECT_SEND_MSG) ? + &msg_buffer->recv_msg : &msg_buffer->resp_msg; +} + +static u32 sss_mbx_dma_data_xor(u32 *data, u16 data_len) +{ + u16 i; + u16 cnt = data_len / sizeof(u32); + u32 val = SSS_MBX_DMA_MSG_INIT_XOR_VAL; + + for (i = 0; i < cnt; i++) + val ^= data[i]; + + return val; +} + +static void sss_mbx_fill_dma_msg_buf(struct sss_mbx_dma_queue *queue, + struct sss_mbx_dma_msg *dma_msg, + void *data, u16 data_len) +{ + u64 pi; + u64 dma_paddr; + void *dma_vaddr; + + pi = queue->pi * SSS_MBX_BUF_SIZE_MAX; + dma_vaddr = (u8 *)queue->dma_buff_vaddr + pi; + dma_paddr = queue->dma_buff_paddr + pi; + memcpy(dma_vaddr, data, data_len); + + dma_msg->dma_addr_h = upper_32_bits(dma_paddr); + dma_msg->dma_addr_l = lower_32_bits(dma_paddr); + dma_msg->msg_len = data_len; + dma_msg->xor = sss_mbx_dma_data_xor(dma_vaddr, + ALIGN(data_len, SSS_MBX_XOR_DATA_ALIGN)); +} + +static struct sss_mbx_dma_queue * +sss_get_mbx_dma_queue(struct sss_mbx *mbx, + enum sss_msg_ack_type ack_type) +{ + u32 val; + struct sss_mbx_dma_queue *queue = NULL; + + val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF); + if (ack_type == SSS_MSG_ACK) { + queue = &mbx->sync_msg_queue; + queue->ci = SSS_GET_MBX_MQ_CI(val, SYNC); + } else { + queue = &mbx->async_msg_queue; + queue->ci = SSS_GET_MBX_MQ_CI(val, ASYNC); + } + + if (SSS_IS_MSG_QUEUE_FULL(queue)) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Mbx sync mq is busy, pi: %u, ci: %u\n", + queue->pi, SSS_MQ_ID_MASK(queue, queue->ci)); + return NULL; + } + + return queue; +} + +static void sss_fill_mbx_msg_body(struct sss_mbx_dma_queue *queue, + struct sss_mbx_dma_msg *dma_msg, void *msg_body, u16 body_len) +{ + sss_mbx_fill_dma_msg_buf(queue, dma_msg, msg_body, body_len); + queue->pi = SSS_MQ_ID_MASK(queue, queue->pi + 1); +} + +static void sss_clear_mbx_status(struct sss_mbx_send *send_mbx) +{ + *send_mbx->wb_state = 0; + + /* clear mbx wb state */ + wmb(); +} + +static void sss_chip_send_mbx_msg_header(struct sss_hwdev *hwdev, + struct sss_mbx_send *send_mbx, u64 *msg_header) +{ + u32 i; + u32 *header = (u32 *)msg_header; + u32 cnt = SSS_MBX_HEADER_SIZE / sizeof(u32); + + for (i = 0; i < cnt; i++) + __raw_writel(cpu_to_be32(*(header + i)), send_mbx->data + i * sizeof(u32)); +} + +static void sss_chip_send_mbx_msg_body(struct sss_hwdev *hwdev, + struct sss_mbx_send *send_mbx, void *body, u16 body_len) +{ + u32 *msg_data = body; + u32 size = sizeof(u32); + u32 i; + u8 buf[SSS_MBX_SEG_SIZE] = {0}; + u32 cnt = ALIGN(body_len, size) / size; + + if (body_len % size != 0) { + memcpy(buf, body, body_len); + msg_data = (u32 *)buf; + } + + for (i = 0; i < cnt; i++) { + __raw_writel(cpu_to_be32(*(msg_data + i)), + send_mbx->data + SSS_MBX_HEADER_SIZE + i * size); + } +} + +static void sss_chip_write_mbx_msg_attr(struct sss_mbx *mbx, + u16 dest, u16 aeq_num, u16 seg_len) +{ + u16 size; + u16 dest_func_id; + u32 intr; + u32 ctrl; + + size = ALIGN(seg_len + SSS_MBX_HEADER_SIZE, SSS_MBX_SEG_LEN_ALIGN) >> 2; + intr = SSS_SET_MBX_INT(aeq_num, DST_AEQN) | + SSS_SET_MBX_INT(0, SRC_RESP_AEQN) | + SSS_SET_MBX_INT(SSS_NO_DMA_ATTR, STAT_DMA) | + SSS_SET_MBX_INT(size, TX_SIZE) | + SSS_SET_MBX_INT(SSS_MBX_STRONG_ORDER, STAT_DMA_SO_RO) | + SSS_SET_MBX_INT(SSS_MBX_WB, WB_EN); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, + SSS_HW_CSR_MBX_INT_OFFSET_OFF, intr); + + /* make sure write mbx intr attr reg */ + wmb(); + + dest_func_id = (SSS_IS_VF(SSS_TO_HWDEV(mbx)) && dest != SSS_MGMT_SRC_ID) ? 0 : dest; + ctrl = SSS_SET_MBX_CTRL(SSS_MBX_TX_NOT_COMPLETE, TX_STATUS) | + SSS_SET_MBX_CTRL(SSS_MBX_NOT_TRIG, TRIGGER_AEQE) | + SSS_SET_MBX_CTRL(dest_func_id, DST_FUNC); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, + SSS_HW_CSR_MBX_CTRL_OFF, ctrl); + + /* make sure write mbx ctrl reg */ + wmb(); +} + +static void sss_dump_mbx_reg(struct sss_hwdev *hwdev) +{ + u32 val1; + u32 val2; + + val1 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_CTRL_OFF); + val2 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_INT_OFFSET_OFF); + + sdk_err(hwdev->dev_hdl, "Mbx ctrl reg:0x%x, intr offset:0x%x\n", val1, val2); +} + +static u16 sss_get_mbx_status(const struct sss_mbx_send *send_mbx) +{ + u64 val = be64_to_cpu(*send_mbx->wb_state); + + /* read wb state before returning it */ + rmb(); + + return (u16)(val & SSS_MBX_WB_STATUS_ERRCODE_MASK); +} + +static enum sss_process_ret sss_check_mbx_wb_status(void *priv_data) +{ + u16 status; + struct sss_mbx *mbx = priv_data; + + if (SSS_MBX_MSG_CHN_STOP(mbx) || !SSS_TO_HWDEV(mbx)->chip_present_flag) + return SSS_PROCESS_ERR; + + status = sss_get_mbx_status(&mbx->mbx_send); + + return SSS_MBX_STATUS_FINISHED(status) ? SSS_PROCESS_OK : SSS_PROCESS_DOING; +} + +static int sss_chip_send_mbx_fragment(struct sss_mbx *mbx, u16 dest_func_id, + u64 msg_header, void *msg_body, u16 body_len) +{ + u16 aeq_type; + u16 status = 0; + u16 err_code; + u16 direction; + int ret; + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION); + aeq_type = (SSS_GET_HWIF_AEQ_NUM(hwdev->hwif) > SSS_MBX_RSP_MSG_AEQ && + direction != SSS_DIRECT_SEND_MSG) ? SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ; + + sss_clear_mbx_status(send_mbx); + + sss_chip_send_mbx_msg_header(hwdev, send_mbx, &msg_header); + + sss_chip_send_mbx_msg_body(hwdev, send_mbx, msg_body, body_len); + + sss_chip_write_mbx_msg_attr(mbx, dest_func_id, aeq_type, body_len); + + ret = sss_check_handler_timeout(mbx, sss_check_mbx_wb_status, + SSS_MBX_MSG_POLL_TIMEOUT_MS, USEC_PER_MSEC); + status = sss_get_mbx_status(send_mbx); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Send mbx seg timeout, wb status: 0x%x\n", status); + sss_dump_mbx_reg(hwdev); + return -ETIMEDOUT; + } + + if (!SSS_MBX_STATUS_SUCCESS(status)) { + sdk_err(hwdev->dev_hdl, "Fail to send mbx seg to func %u, wb status: 0x%x\n", + dest_func_id, status); + err_code = SSS_MBX_STATUS_ERRCODE(status); + return (err_code != 0) ? err_code : -EFAULT; + } + + return 0; +} + +static int sss_send_mbx_to_chip(struct sss_mbx *mbx, u16 dest_func_id, + u64 msg_header, u8 *msg_body, u16 body_len) +{ + int ret; + u16 seg_len = SSS_MBX_SEG_SIZE; + u32 seq_id = 0; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + while (body_len > 0) { + if (body_len <= SSS_MBX_SEG_SIZE) { + msg_header &= ~SSS_MBX_SEGLEN_MASK; + msg_header |= SSS_SET_MSG_HEADER(body_len, SEG_LEN); + msg_header |= SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST); + seg_len = body_len; + } + + ret = sss_chip_send_mbx_fragment(mbx, dest_func_id, msg_header, msg_body, seg_len); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to send mbx seg, seq_id=0x%llx\n", + SSS_GET_MSG_HEADER(msg_header, SEQID)); + return ret; + } + + seq_id++; + msg_body += seg_len; + body_len -= seg_len; + msg_header &= ~(SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEQID_MASK, SEQID)); + msg_header |= SSS_SET_MSG_HEADER(seq_id, SEQID); + } + + return 0; +} + +int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd, void *msg, + u16 msg_len, u16 dest_func_id, enum sss_msg_direction_type direction, + enum sss_msg_ack_type ack_type, struct sss_mbx_msg_info *msg_info) +{ + u8 *msg_body = NULL; + u64 msg_header = 0; + int ret = 0; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_mbx_dma_msg msg_dma = {0}; + enum sss_data_type type = SSS_INLINE_DATA; + struct sss_mbx_dma_queue *queue = NULL; + + mutex_lock(&mbx->msg_send_lock); + + if (SSS_IS_DMA_MBX_MSG(dest_func_id) && !SSS_SUPPORT_MBX_SEGMENT(hwdev)) { + queue = sss_get_mbx_dma_queue(mbx, ack_type); + if (!queue) { + ret = -EBUSY; + goto out; + } + + sss_fill_mbx_msg_body(queue, &msg_dma, msg, msg_len); + + type = SSS_DMA_DATA; + msg = &msg_dma; + msg_len = sizeof(msg_dma); + } + + msg_body = (u8 *)msg; + msg_header = SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod, + ack_type, type, direction, cmd); + + ret = sss_send_mbx_to_chip(mbx, dest_func_id, msg_header, msg_body, msg_len); + +out: + mutex_unlock(&mbx->msg_send_lock); + + return ret; +} + +static void sss_set_mbx_event_flag(struct sss_mbx *mbx, + enum sss_mbx_event_state event_flag) +{ + spin_lock(&mbx->mbx_lock); + mbx->event_flag = event_flag; + spin_unlock(&mbx->mbx_lock); +} + +static enum sss_process_ret check_mbx_msg_finish(void *priv_data) +{ + struct sss_mbx *mbx = priv_data; + + if (SSS_MBX_MSG_CHN_STOP(mbx) || SSS_TO_HWDEV(mbx)->chip_present_flag == 0) + return SSS_PROCESS_ERR; + + return (mbx->event_flag == SSS_EVENT_SUCCESS) ? SSS_PROCESS_OK : SSS_PROCESS_DOING; +} + +static int sss_wait_mbx_msg_completion(struct sss_mbx *mbx, u32 timeout) +{ + u32 wait_time; + int ret; + + wait_time = (timeout != 0) ? timeout : SSS_MBX_COMPLETE_WAIT_TIME_MS; + ret = sss_check_handler_timeout(mbx, check_mbx_msg_finish, + wait_time, USEC_PER_MSEC); + if (ret != 0) { + sss_set_mbx_event_flag(mbx, SSS_EVENT_TIMEOUT); + return -ETIMEDOUT; + } + + sss_set_mbx_event_flag(mbx, SSS_EVENT_END); + + return 0; +} + +static int sss_send_mbx_msg_lock(struct sss_mbx *mbx, u16 channel) +{ + if (!mbx->lock_channel_en) { + mutex_lock(&mbx->mbx_send_lock); + return 0; + } + + while (test_bit(channel, &mbx->channel_stop) == 0) { + if (mutex_trylock(&mbx->mbx_send_lock) != 0) + return 0; + + usleep_range(SSS_MBX_TRY_LOCK_SLEPP_US - 1, SSS_MBX_TRY_LOCK_SLEPP_US); + } + + return -EAGAIN; +} + +static void sss_send_mbx_msg_unlock(struct sss_mbx *mbx) +{ + mutex_unlock(&mbx->mbx_send_lock); +} + +int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd, + u16 dest_func_id, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_msg_desc *msg_desc = NULL; + struct sss_mbx_msg_info msg_info = {0}; + int ret; + + if (SSS_TO_HWDEV(mbx)->chip_present_flag == 0) + return -EPERM; + + msg_desc = sss_get_mbx_msg_desc(mbx, dest_func_id, SSS_RESP_MSG); + if (!msg_desc) + return -EFAULT; + + ret = sss_send_mbx_msg_lock(mbx, channel); + if (ret != 0) + return ret; + + mbx->cur_msg_channel = channel; + SSS_INCREASE_MBX_MSG_ID(mbx); + sss_set_mbx_event_flag(mbx, SSS_EVENT_START); + + msg_info.msg_id = SSS_MBX_MSG_ID(mbx); + ret = sss_send_mbx_msg(mbx, mod, cmd, buf_in, in_size, dest_func_id, + SSS_DIRECT_SEND_MSG, SSS_MSG_ACK, &msg_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Fail to send mbx mod %u, cmd %u, msg_id: %u, err: %d\n", + mod, cmd, msg_info.msg_id, ret); + sss_set_mbx_event_flag(mbx, SSS_EVENT_FAIL); + goto send_err; + } + + if (sss_wait_mbx_msg_completion(mbx, timeout)) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Send mbx msg timeout, msg_id: %u\n", msg_info.msg_id); + sss_dump_aeq_info(SSS_TO_HWDEV(mbx)); + ret = -ETIMEDOUT; + goto send_err; + } + + if (mod != msg_desc->mod || cmd != msg_desc->cmd) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid response mbx message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", + msg_desc->mod, msg_desc->cmd, mod, cmd); + ret = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_info.state) { + ret = msg_desc->msg_info.state; + goto send_err; + } + + if (buf_out && out_size) { + if (*out_size < msg_desc->msg_len) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid response mbx message length: %u for mod %d cmd %u, should less than: %u\n", + msg_desc->msg_len, mod, cmd, *out_size); + ret = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_len) + memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); + + *out_size = msg_desc->msg_len; + } + +send_err: + sss_send_mbx_msg_unlock(mbx); + + return ret; +} + +int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id, + u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) +{ + struct sss_mbx_msg_info msg_info = {0}; + int ret; + + ret = sss_check_mbx_param(hwdev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + ret = sss_send_mbx_msg_lock(hwdev->mbx, channel); + if (ret != 0) + return ret; + + ret = sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size, + func_id, SSS_DIRECT_SEND_MSG, SSS_MSG_NO_ACK, &msg_info); + if (ret != 0) + sdk_err(hwdev->dev_hdl, "Fail to send mbx no ack\n"); + + sss_send_mbx_msg_unlock(hwdev->mbx); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h new file mode 100644 index 00000000000000..f3f253046f8fc4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MBX_H +#define SSS_HWIF_MBX_H + +#include "sss_hw.h" +#include "sss_hwdev.h" + +#define SSS_MGMT_SRC_ID 0x1FFF + +#define SSS_IS_DMA_MBX_MSG(dest_func_id) ((dest_func_id) == SSS_MGMT_SRC_ID) + +#define SSS_MBX_BUF_SIZE_MAX 2048U + +#define SSS_MBX_HEADER_SIZE 8 + +/* MBX size is 64B, 8B for mbx_header, 8B reserved */ +#define SSS_MBX_SEG_SIZE 48 +#define SSS_MBX_DATA_SIZE (SSS_MBX_BUF_SIZE_MAX - SSS_MBX_HEADER_SIZE) + +#define SSS_MBX_MQ_CI_OFF (SSS_CSR_CFG_FLAG + \ + SSS_HW_CSR_MBX_DATA_OFF + SSS_MBX_HEADER_SIZE + SSS_MBX_SEG_SIZE) + +#define SSS_MBX_MQ_SYNC_CI_SHIFT 0 +#define SSS_MBX_MQ_ASYNC_CI_SHIFT 8 + +#define SSS_MBX_MQ_SYNC_CI_MASK 0xFF +#define SSS_MBX_MQ_ASYNC_CI_MASK 0xFF + +#define SSS_GET_MBX_MQ_CI(val, field) \ + (((val) >> SSS_MBX_MQ_##field##_CI_SHIFT) & SSS_MBX_MQ_##field##_CI_MASK) +#define SSS_CLEAR_MBX_MQ_CI(val, field) \ + ((val) & (~(SSS_MBX_MQ_##field##_CI_MASK << SSS_MBX_MQ_##field##_CI_SHIFT))) + +/* Recv func mbx msg */ +struct sss_recv_mbx { + void *buf; + u16 buf_len; + u8 msg_id; + u8 mod; + u16 cmd; + u16 src_func_id; + enum sss_msg_ack_type ack_type; + void *resp_buf; +}; + +enum sss_mbx_cb_state { + SSS_VF_RECV_HANDLER_REG = 0, + SSS_VF_RECV_HANDLER_RUN, + SSS_PF_RECV_HANDLER_REG, + SSS_PF_RECV_HANDLER_RUN, + SSS_PPF_RECV_HANDLER_REG, + SSS_PPF_RECV_HANDLER_RUN, + SSS_PPF_TO_PF_RECV_HANDLER_REG, + SSS_PPF_TO_PF_RECV_HANDLER_RUN, +}; + +static inline int sss_check_mbx_param(struct sss_mbx *mbx, + void *buf_in, u16 in_size, u16 channel) +{ + if (!buf_in || in_size == 0) + return -EINVAL; + + if (in_size > SSS_MBX_DATA_SIZE) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Mbx msg len %u exceed limit: [1, %u]\n", + in_size, SSS_MBX_DATA_SIZE); + return -EINVAL; + } + + if (channel >= SSS_CHANNEL_MAX) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + return 0; +} + +struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction); +int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dest, enum sss_msg_direction_type direction_type, + enum sss_msg_ack_type type, struct sss_mbx_msg_info *msg_info); +int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd, + u16 dest_func_id, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel); +int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id, + u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); +#define sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size, channel) \ + sss_send_mbx_to_func_no_ack(hwdev, SSS_MGMT_SRC_ID, mod, cmd, \ + buf_in, in_size, channel) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c new file mode 100644 index 00000000000000..02ee99eba20a11 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_export.h" + +#define SSS_WAIT_CB_COMPLETE_MIN 900 +#define SSS_WAIT_CB_COMPLETE_MAX 1000 + +int sss_register_pf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_pf_mbx_handler_t cb) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return -EFAULT; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + mbx->pf_mbx_cb[mod] = cb; + mbx->pf_mbx_data[mod] = pri_handle; + + set_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(sss_register_pf_mbx_handler); + +int sss_register_vf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_vf_mbx_handler_t cb) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return -EFAULT; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + mbx->vf_mbx_cb[mod] = cb; + mbx->vf_mbx_data[mod] = pri_handle; + + set_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(sss_register_vf_mbx_handler); + +void sss_unregister_pf_mbx_handler(void *hwdev, u8 mod) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + + clear_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]); + + while (test_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[mod]) != 0) + usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX); + + mbx->pf_mbx_cb[mod] = NULL; + mbx->pf_mbx_data[mod] = NULL; +} +EXPORT_SYMBOL(sss_unregister_pf_mbx_handler); + +void sss_unregister_vf_mbx_handler(void *hwdev, u8 mod) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + + clear_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]); + + while (test_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[mod]) != 0) + usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX); + + mbx->vf_mbx_cb[mod] = NULL; + mbx->vf_mbx_data[mod] = NULL; +} +EXPORT_SYMBOL(sss_unregister_vf_mbx_handler); + +int sss_mbx_send_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_hwdev *dev = hwdev; + int ret; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (!SSS_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Invalid func_type: %d\n", + SSS_GET_FUNC_TYPE(dev)); + return -EINVAL; + } + + return sss_send_mbx_to_func(dev->mbx, mod, cmd, + sss_get_pf_id_of_vf(dev), buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_mbx_send_to_pf); + +int sss_mbx_send_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_hwdev *dev = hwdev; + int ret = 0; + u16 dst_func_id; + + if (!hwdev) + return -EINVAL; + + ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (SSS_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Invalid func_type: %d\n", + SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev)); + return -EINVAL; + } + + if (vf_id == 0) { + sdk_err(dev->dev_hdl, "Invalid vf_id: %u\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_id = sss_get_glb_pf_vf_offset(hwdev) + vf_id; + + return sss_send_mbx_to_func(dev->mbx, mod, cmd, + dst_func_id, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_mbx_send_to_vf); + +static int sss_send_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + struct sss_mbx *func_to_func = hwdev->mbx; + int ret; + + ret = sss_check_mbx_param(func_to_func, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (mod == SSS_MOD_TYPE_COMM && cmd == SSS_COMM_MGMT_CMD_SEND_API_ACK_BY_UP) + return 0; + + return sss_send_mbx_to_func(func_to_func, mod, cmd, SSS_MGMT_SRC_ID, + buf_in, in_size, buf_out, out_size, timeout, channel); +} + +int sss_sync_mbx_send_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (sss_get_dev_present_flag(hwdev) == 0) + return -EPERM; + + return sss_send_mbx_to_mgmt(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_sync_mbx_send_msg); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c new file mode 100644 index 00000000000000..e43a7967074d06 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c @@ -0,0 +1,889 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_mbx.h" +#include "sss_csr.h" +#include "sss_common.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwif_mbx_init.h" + +#define SSS_MBX_WB_STATUS_SIZE 16UL + +#define SSS_MBX_DMA_MSG_QUEUE_DEPTH 32 + +#define SSS_MBX_WQ_NAME "sss_mbx" + +#define SSS_MBX_AREA(hwif) \ + ((hwif)->cfg_reg_base + SSS_HW_CSR_MBX_DATA_OFF) + +#define SSS_GET_MBX_BODY(header) ((u8 *)(header) + SSS_MBX_HEADER_SIZE) + +#define SSS_MBX_LAST_SEG_MAX_SIZE \ + (SSS_MBX_BUF_SIZE_MAX - SSS_MAX_SEG_ID * SSS_MBX_SEG_SIZE) + +#define SSS_MSG_PROCESS_CNT_MAX 10 + +#define SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id) \ + ((src_func_id) < SSS_MAX_PF_NUM(hwdev)) + +#define SSS_MBX_MSG_NO_DATA_SIZE 1 + +#define SSS_MBX_PF_SEND_ERR 0x1 + +#define SSS_MAX_SEG_ID 42 + +struct sss_mbx_work { + struct work_struct work; + struct sss_mbx *mbx; + struct sss_recv_mbx *recv_mbx; + struct sss_msg_buffer *msg_buffer; +}; + +static int sss_alloc_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq) +{ + u32 size; + + size = mq->depth * SSS_MBX_BUF_SIZE_MAX; + mq->dma_buff_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, + GFP_KERNEL); + if (!mq->dma_buff_vaddr) { + sdk_err(hwdev->dev_hdl, "Fail to alloc dma_buffer\n"); + return -ENOMEM; + } + + return 0; +} + +static void sss_free_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq) +{ + dma_free_coherent(hwdev->dev_hdl, mq->depth * SSS_MBX_BUF_SIZE_MAX, + mq->dma_buff_vaddr, mq->dma_buff_paddr); + mq->dma_buff_vaddr = NULL; + mq->dma_buff_paddr = 0; +} + +static int sss_mbx_alloc_mq_dma_addr(struct sss_mbx *mbx) +{ + int ret; + + ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + if (ret != 0) + return ret; + + ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue); + if (ret != 0) { + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + return ret; + } + + return 0; +} + +static void sss_mbx_free_mq_dma_addr(struct sss_mbx *mbx) +{ + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue); +} + +static int sss_mbx_alloc_mq_wb_addr(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + send_mbx->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE, + &send_mbx->wb_paddr, GFP_KERNEL); + if (!send_mbx->wb_vaddr) + return -ENOMEM; + + send_mbx->wb_state = send_mbx->wb_vaddr; + + return 0; +} + +static void sss_mbx_free_mq_wb_addr(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + dma_free_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE, + send_mbx->wb_vaddr, send_mbx->wb_paddr); + + send_mbx->wb_vaddr = NULL; +} + +static int sss_alloc_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer) +{ + msg_buffer->resp_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!msg_buffer->resp_msg.msg) + return -ENOMEM; + + msg_buffer->recv_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!msg_buffer->recv_msg.msg) { + kfree(msg_buffer->resp_msg.msg); + msg_buffer->resp_msg.msg = NULL; + return -ENOMEM; + } + + atomic_set(&msg_buffer->recv_msg_cnt, 0); + msg_buffer->recv_msg.seq_id = SSS_MAX_SEG_ID; + msg_buffer->resp_msg.seq_id = SSS_MAX_SEG_ID; + + return 0; +} + +static void sss_free_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer) +{ + kfree(msg_buffer->recv_msg.msg); + msg_buffer->recv_msg.msg = NULL; + kfree(msg_buffer->resp_msg.msg); + msg_buffer->resp_msg.msg = NULL; +} + +static int sss_mbx_alloc_dma_addr(struct sss_mbx *sss_mbx) +{ + int ret; + + ret = sss_mbx_alloc_mq_dma_addr(sss_mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to alloc mbx dma queue\n"); + return -ENOMEM; + } + + ret = sss_mbx_alloc_mq_wb_addr(sss_mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to init mbx dma wb addr\n"); + goto alloc_dma_wb_addr_err; + } + + return 0; + +alloc_dma_wb_addr_err: + sss_mbx_free_mq_dma_addr(sss_mbx); + + return -ENOMEM; +} + +static void sss_mbx_free_dma_addr(struct sss_mbx *mbx) +{ + sss_mbx_free_mq_wb_addr(mbx); + sss_mbx_free_mq_dma_addr(mbx); +} + +static int sss_init_mbx_info(struct sss_mbx *mbx) +{ + int ret; + + mutex_init(&mbx->mbx_send_lock); + mutex_init(&mbx->msg_send_lock); + spin_lock_init(&mbx->mbx_lock); + mbx->sync_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH; + mbx->async_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH; + + mbx->workq = create_singlethread_workqueue(SSS_MBX_WQ_NAME); + if (!mbx->workq) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to create mbx workq\n"); + return -ENOMEM; + } + + ret = sss_alloc_mbx_msg_buffer(&mbx->mgmt_msg); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc mgmt message buffer\n"); + goto alloc_mbx_msg_buffer_err; + } + + ret = sss_mbx_alloc_dma_addr(mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc dma addr\n"); + goto mbx_alloc_dma_addr_err; + } + + return 0; + +mbx_alloc_dma_addr_err: + sss_free_mbx_msg_buffer(&mbx->mgmt_msg); +alloc_mbx_msg_buffer_err: + destroy_workqueue(mbx->workq); + + return -ENOMEM; +} + +static void sss_deinit_mbx_info(struct sss_mbx *mbx) +{ + if (mbx->workq) { + destroy_workqueue(mbx->workq); + mbx->workq = NULL; + } + + sss_mbx_free_dma_addr(mbx); + sss_free_mbx_msg_buffer(&mbx->mgmt_msg); +} + +static int sss_alloc_func_mbx_msg(struct sss_mbx *mbx, u16 func_num) +{ + if (mbx->func_msg) + return (mbx->num_func_msg == func_num) ? 0 : -EFAULT; + + mbx->func_msg = kcalloc(func_num, sizeof(*mbx->func_msg), GFP_KERNEL); + if (!mbx->func_msg) + return -ENOMEM; + + return 0; +} + +static void sss_free_func_mbx_msg(struct sss_mbx *mbx) +{ + kfree(mbx->func_msg); + mbx->func_msg = NULL; +} + +int sss_init_func_mbx_msg(void *hwdev, u16 func_num) +{ + u16 i; + u16 cnt; + int ret; + struct sss_hwdev *dev = hwdev; + struct sss_mbx *mbx = dev->mbx; + + if (!hwdev || func_num == 0 || func_num > SSS_MAX_FUNC) + return -EINVAL; + + ret = sss_alloc_func_mbx_msg(mbx, func_num); + if (ret != 0) { + sdk_err(dev->dev_hdl, "Fail to alloc func msg\n"); + return ret; + } + + for (cnt = 0; cnt < func_num; cnt++) { + ret = sss_alloc_mbx_msg_buffer(&mbx->func_msg[cnt]); + if (ret != 0) { + sdk_err(dev->dev_hdl, "Fail to alloc func %u msg buf\n", cnt); + goto alloc_mbx_msg_buf_err; + } + } + + mbx->num_func_msg = func_num; + + return 0; + +alloc_mbx_msg_buf_err: + for (i = 0; i < cnt; i++) + sss_free_mbx_msg_buffer(&mbx->func_msg[i]); + + sss_free_func_mbx_msg(mbx); + + return -ENOMEM; +} + +static void sss_deinit_func_mbx_msg(struct sss_mbx *mbx) +{ + u16 i; + + if (!mbx->func_msg) + return; + + for (i = 0; i < mbx->num_func_msg; i++) + sss_free_mbx_msg_buffer(&mbx->func_msg[i]); + + sss_free_func_mbx_msg(mbx); +} + +static void sss_chip_reset_mbx_ci(struct sss_mbx *mbx) +{ + u32 val; + + val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF); + val = SSS_CLEAR_MBX_MQ_CI(val, SYNC); + val = SSS_CLEAR_MBX_MQ_CI(val, ASYNC); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF, val); +} + +static void sss_chip_set_mbx_wb_attr(struct sss_mbx *mbx) +{ + u32 addr_h; + u32 addr_l; + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + addr_h = upper_32_bits(send_mbx->wb_paddr); + addr_l = lower_32_bits(send_mbx->wb_paddr); + + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, addr_h); + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, addr_l); +} + +static void sss_chip_set_mbx_attr(struct sss_mbx *mbx) +{ + sss_chip_reset_mbx_ci(mbx); + sss_chip_set_mbx_wb_attr(mbx); +} + +static void sss_chip_reset_mbx_attr(struct sss_mbx *sss_mbx) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(sss_mbx); + + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, 0); + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, 0); +} + +static void sss_prepare_send_mbx(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + + send_mbx->data = SSS_MBX_AREA(SSS_TO_HWDEV(mbx)->hwif); +} + +static int sss_alloc_host_msg(struct sss_hwdev *hwdev) +{ + int i; + int ret; + int host_id; + u8 max_host = SSS_MAX_HOST_NUM(hwdev); + struct sss_mbx *mbx = hwdev->mbx; + + if (max_host == 0) + return 0; + + mbx->host_msg = kcalloc(max_host, sizeof(*mbx->host_msg), GFP_KERNEL); + if (!mbx->host_msg) + return -ENOMEM; + + for (host_id = 0; host_id < max_host; host_id++) { + ret = sss_alloc_mbx_msg_buffer(&mbx->host_msg[host_id]); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Fail to alloc host %d msg channel\n", host_id); + goto out; + } + } + + mbx->support_h2h_msg = true; + + return 0; + +out: + for (i = 0; i < host_id; i++) + sss_free_mbx_msg_buffer(&mbx->host_msg[i]); + + kfree(mbx->host_msg); + mbx->host_msg = NULL; + + return -ENOMEM; +} + +static void sss_free_host_msg(struct sss_mbx *mbx) +{ + int i; + + if (!mbx->host_msg) + return; + + for (i = 0; i < SSS_MAX_HOST_NUM(SSS_TO_HWDEV(mbx)); i++) + sss_free_mbx_msg_buffer(&mbx->host_msg[i]); + + kfree(mbx->host_msg); + mbx->host_msg = NULL; +} + +int sss_hwif_init_mbx(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_mbx *mbx; + + mbx = kzalloc(sizeof(*mbx), GFP_KERNEL); + if (!mbx) + return -ENOMEM; + + hwdev->mbx = mbx; + mbx->hwdev = hwdev; + + ret = sss_init_mbx_info(mbx); + if (ret != 0) + goto init_mbx_info_err; + + if (SSS_IS_VF(hwdev)) { + ret = sss_init_func_mbx_msg(hwdev, 1); + if (ret != 0) + goto init_func_mbx_msg_err; + } + + sss_chip_set_mbx_attr(mbx); + + sss_prepare_send_mbx(mbx); + + ret = sss_alloc_host_msg(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc host msg\n"); + goto alloc_host_msg_err; + } + + return 0; + +alloc_host_msg_err: + sss_chip_reset_mbx_attr(mbx); + sss_deinit_func_mbx_msg(mbx); + +init_func_mbx_msg_err: + sss_deinit_mbx_info(mbx); + +init_mbx_info_err: + kfree(mbx); + hwdev->mbx = NULL; + + return ret; +} + +void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev) +{ + struct sss_mbx *mbx = hwdev->mbx; + + destroy_workqueue(mbx->workq); + mbx->workq = NULL; + + sss_chip_reset_mbx_attr(mbx); + + sss_free_host_msg(mbx); + + sss_deinit_func_mbx_msg(mbx); + + sss_deinit_mbx_info(mbx); + + kfree(mbx); + hwdev->mbx = NULL; +} + +static bool sss_check_mbx_msg_header(void *dev_hdl, + struct sss_msg_desc *msg_desc, u64 mbx_header) +{ + u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID); + u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN); + u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID); + u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE); + u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD); + + if (seq_id > SSS_MAX_SEG_ID) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x\n", seq_id); + return false; + } + + if (seg_len > SSS_MBX_SEG_SIZE) { + sdk_err(dev_hdl, "Current seg info: seg_len = 0x%x\n", seg_len); + return false; + } + + if (seq_id == SSS_MAX_SEG_ID && seg_len > SSS_MBX_LAST_SEG_MAX_SIZE) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, seg_len = 0x%x\n", + seq_id, seg_len); + return false; + } + + if (seq_id == 0) + return true; + + if (seq_id != msg_desc->seq_id + 1) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, 0x%x\n", + seq_id, msg_desc->seq_id); + return false; + } + + if (msg_id != msg_desc->msg_info.msg_id) { + sdk_err(dev_hdl, "Current seg info: msg_id = 0x%x, 0x%x\n", + msg_id, msg_desc->msg_info.msg_id); + return false; + } + + if (mod != msg_desc->mod) { + sdk_err(dev_hdl, "Current seg info: mod = 0x%x, 0x%x\n", + mod, msg_desc->mod); + return false; + } + + if (cmd != msg_desc->cmd) { + sdk_err(dev_hdl, "Current seg info: cmd = 0x%x, 0x%x\n", + cmd, msg_desc->cmd); + return false; + } + + return true; +} + +static void sss_fill_msg_desc(struct sss_msg_desc *msg_desc, u64 *msg_header) +{ + u64 mbx_header = *msg_header; + u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID); + u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN); + u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID); + u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE); + u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD); + u32 offset = seq_id * SSS_MBX_SEG_SIZE; + void *msg_body = SSS_GET_MBX_BODY(((void *)msg_header)); + + msg_desc->seq_id = seq_id; + if (seq_id == 0) { + msg_desc->msg_info.msg_id = msg_id; + msg_desc->mod = mod; + msg_desc->cmd = cmd; + } + msg_desc->msg_len = SSS_GET_MSG_HEADER(mbx_header, MSG_LEN); + msg_desc->msg_info.state = SSS_GET_MSG_HEADER(mbx_header, STATUS); + memcpy((u8 *)msg_desc->msg + offset, msg_body, seg_len); +} + +static struct sss_recv_mbx *sss_alloc_recv_mbx(void) +{ + struct sss_recv_mbx *recv_mbx = NULL; + + recv_mbx = kzalloc(sizeof(*recv_mbx), GFP_KERNEL); + if (!recv_mbx) + return NULL; + + recv_mbx->buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!recv_mbx->buf) + goto alloc_recv_mbx_buf_err; + + recv_mbx->resp_buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!recv_mbx->resp_buf) + goto alloc_recv_mbx_resp_buf_err; + + return recv_mbx; + +alloc_recv_mbx_resp_buf_err: + kfree(recv_mbx->buf); + +alloc_recv_mbx_buf_err: + kfree(recv_mbx); + + return NULL; +} + +static void sss_free_recv_mbx(struct sss_recv_mbx *recv_mbx) +{ + kfree(recv_mbx->resp_buf); + kfree(recv_mbx->buf); + kfree(recv_mbx); +} + +static int sss_recv_vf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size) +{ + int ret; + sss_vf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %u\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->vf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[recv_mbx->mod])) { + ret = callback(mbx->vf_mbx_data[recv_mbx->mod], recv_mbx->cmd, recv_mbx->buf, + recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "VF mbx cb is unregistered\n"); + ret = -EINVAL; + } + + clear_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static int sss_recv_pf_from_ppf_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size) +{ + int ret; + sss_pf_from_ppf_mbx_handler_t callback; + enum sss_mod_type mod = recv_mbx->mod; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %d\n", mod); + return -EINVAL; + } + + set_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]); + + callback = mbx->pf_recv_ppf_mbx_cb[mod]; + if (callback && + test_bit(SSS_PPF_TO_PF_RECV_HANDLER_REG, &mbx->ppf_to_pf_mbx_cb_state[mod]) != 0) { + ret = callback(mbx->pf_recv_ppf_mbx_data[mod], recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PF recv ppf mbx cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]); + + return ret; +} + +static int sss_recv_ppf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, u8 pf_id, + void *resp_buf, u16 *size) +{ + int ret; + u16 vf_id = 0; + sss_ppf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %u\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->ppf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_PPF_RECV_HANDLER_REG, &mbx->ppf_mbx_cb_state[recv_mbx->mod])) { + ret = callback(mbx->ppf_mbx_data[recv_mbx->mod], pf_id, vf_id, recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PPF mbx cb is unregistered, mod = %u\n", recv_mbx->mod); + ret = -EINVAL; + } + + clear_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static int sss_recv_pf_from_vf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, + u16 src_func_id, void *resp_buf, + u16 *size) +{ + int ret; + u16 vf_id = 0; + sss_pf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %u\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->pf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[recv_mbx->mod]) != 0) { + vf_id = src_func_id - sss_get_glb_pf_vf_offset(SSS_TO_HWDEV(mbx)); + ret = callback(mbx->pf_mbx_data[recv_mbx->mod], vf_id, recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PF mbx mod(0x%x) cb is unregistered\n", recv_mbx->mod); + ret = -EINVAL; + } + + clear_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static void sss_send_mbx_response(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, int ret, u16 size, u16 src_func_id) +{ + u16 data_size; + struct sss_mbx_msg_info msg_info = {0}; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + msg_info.msg_id = recv_mbx->msg_id; + if (ret != 0) + msg_info.state = SSS_MBX_PF_SEND_ERR; + + data_size = (size == 0 || ret != 0) ? SSS_MBX_MSG_NO_DATA_SIZE : size; + if (data_size > SSS_MBX_DATA_SIZE) { + sdk_err(hwdev->dev_hdl, "Resp msg len(%d), out of range: %d\n", + data_size, SSS_MBX_DATA_SIZE); + data_size = SSS_MBX_DATA_SIZE; + } + + sss_send_mbx_msg(mbx, recv_mbx->mod, recv_mbx->cmd, recv_mbx->resp_buf, data_size, + src_func_id, SSS_RESP_MSG, SSS_MSG_NO_ACK, &msg_info); +} + +static void sss_recv_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx) +{ + int ret = 0; + void *resp_buf = recv_mbx->resp_buf; + u16 size = SSS_MBX_DATA_SIZE; + u16 src_func_id = recv_mbx->src_func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (SSS_IS_VF(hwdev)) { + ret = sss_recv_vf_mbx_handler(mbx, recv_mbx, resp_buf, &size); + goto out; + } + + if (SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id)) { + if (SSS_IS_PPF(hwdev)) + ret = sss_recv_ppf_mbx_handler(mbx, recv_mbx, + (u8)src_func_id, + resp_buf, &size); + else + ret = sss_recv_pf_from_ppf_handler(mbx, recv_mbx, resp_buf, &size); + } else { + ret = sss_recv_pf_from_vf_mbx_handler(mbx, + recv_mbx, src_func_id, + resp_buf, &size); + } + +out: + if (recv_mbx->ack_type == SSS_MSG_ACK) + sss_send_mbx_response(mbx, recv_mbx, ret, size, src_func_id); +} + +static void sss_recv_mbx_work_handler(struct work_struct *work) +{ + struct sss_mbx_work *mbx_work = container_of(work, struct sss_mbx_work, work); + + sss_recv_mbx_handler(mbx_work->mbx, mbx_work->recv_mbx); + + atomic_dec(&mbx_work->msg_buffer->recv_msg_cnt); + + destroy_work(&mbx_work->work); + + sss_free_recv_mbx(mbx_work->recv_mbx); + + kfree(mbx_work); +} + +static void sss_init_recv_mbx_param(struct sss_recv_mbx *recv_mbx, + struct sss_msg_desc *msg_desc, u64 msg_header) +{ + recv_mbx->msg_id = msg_desc->msg_info.msg_id; + recv_mbx->mod = SSS_GET_MSG_HEADER(msg_header, MODULE); + recv_mbx->cmd = SSS_GET_MSG_HEADER(msg_header, CMD); + recv_mbx->ack_type = SSS_GET_MSG_HEADER(msg_header, NO_ACK); + recv_mbx->src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + recv_mbx->buf_len = msg_desc->msg_len; + memcpy(recv_mbx->buf, msg_desc->msg, msg_desc->msg_len); +} + +static int sss_init_mbx_work(struct sss_mbx *mbx, struct sss_recv_mbx *recv_mbx, + struct sss_msg_buffer *msg_buffer) +{ + struct sss_mbx_work *mbx_work = NULL; + + mbx_work = kzalloc(sizeof(*mbx_work), GFP_KERNEL); + if (!mbx_work) + return -ENOMEM; + + atomic_inc(&msg_buffer->recv_msg_cnt); + + mbx_work->msg_buffer = msg_buffer; + mbx_work->recv_mbx = recv_mbx; + mbx_work->mbx = mbx; + + INIT_WORK(&mbx_work->work, sss_recv_mbx_work_handler); + queue_work_on(WORK_CPU_UNBOUND, mbx->workq, &mbx_work->work); + + return 0; +} + +static void sss_recv_mbx_msg_handler(struct sss_mbx *mbx, + struct sss_msg_desc *msg_desc, u64 msg_header) +{ + u32 msg_cnt; + int ret; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_recv_mbx *recv_mbx = NULL; + struct sss_msg_buffer *msg_buffer = container_of(msg_desc, struct sss_msg_buffer, recv_msg); + + msg_cnt = atomic_read(&msg_buffer->recv_msg_cnt); + if (msg_cnt > SSS_MSG_PROCESS_CNT_MAX) { + u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + + sdk_warn(hwdev->dev_hdl, "This func(%llu) have %u msg wait to process\n", + src_func_id, msg_cnt); + return; + } + + recv_mbx = sss_alloc_recv_mbx(); + if (!recv_mbx) { + sdk_err(hwdev->dev_hdl, "Fail to alloc receive recv_mbx message buffer\n"); + return; + } + + sss_init_recv_mbx_param(recv_mbx, msg_desc, msg_header); + + ret = sss_init_mbx_work(mbx, recv_mbx, msg_buffer); + if (ret != 0) + sss_free_recv_mbx(recv_mbx); +} + +static void sss_resp_mbx_handler(struct sss_mbx *mbx, + const struct sss_msg_desc *msg_desc) +{ + spin_lock(&mbx->mbx_lock); + if (msg_desc->msg_info.msg_id == mbx->send_msg_id && + mbx->event_flag == SSS_EVENT_START) + mbx->event_flag = SSS_EVENT_SUCCESS; + else + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Mbx resp timeout, current send msg_id(0x%x), recv msg_id(0x%x), status(0x%x)\n", + mbx->send_msg_id, msg_desc->msg_info.msg_id, msg_desc->msg_info.state); + spin_unlock(&mbx->mbx_lock); +} + +static void sss_recv_mbx_aeq(struct sss_mbx *mbx, u64 *msg_header, + struct sss_msg_desc *msg_desc) +{ + u64 header = *msg_header; + + if (!sss_check_mbx_msg_header(SSS_TO_HWDEV(mbx)->dev_hdl, msg_desc, header)) { + msg_desc->seq_id = SSS_MAX_SEG_ID; + return; + } + + sss_fill_msg_desc(msg_desc, msg_header); + + if (!SSS_GET_MSG_HEADER(header, LAST)) + return; + + if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_DIRECT_SEND_MSG) { + sss_recv_mbx_msg_handler(mbx, msg_desc, header); + return; + } + + sss_resp_mbx_handler(mbx, msg_desc); +} + +void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size) +{ + u64 msg_header = *((u64 *)header); + u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + u64 direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION); + struct sss_msg_desc *msg_desc = NULL; + struct sss_hwdev *hwdev = (struct sss_hwdev *)handle; + struct sss_mbx *mbx = hwdev->mbx; + + msg_desc = sss_get_mbx_msg_desc(mbx, src_func_id, direction); + if (!msg_desc) { + sdk_err(hwdev->dev_hdl, "Invalid mbx src_func_id: %u\n", (u32)src_func_id); + return; + } + + sss_recv_mbx_aeq(mbx, (u64 *)header, msg_desc); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h new file mode 100644 index 00000000000000..ab440fea3e0a5c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MBX_INIT_H +#define SSS_HWIF_MBX_INIT_H + +#include "sss_hwdev.h" + +int sss_init_func_mbx_msg(void *hwdev, u16 func_num); +int sss_hwif_init_mbx(struct sss_hwdev *hwdev); +void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev); +void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h new file mode 100644 index 00000000000000..c6a085e5444ab1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MGMT_COMMON_H +#define SSS_HWIF_MGMT_COMMON_H + +#define SSS_ASYNC_MSG_FLAG 0x8 + +#define SSS_PF_MGMT_BUF_LEN_MAX 2048UL + +#define SSS_MSG_TO_MGMT_LEN_MAX 2016 + +#define SSS_SEG_LEN 48 + +#define SSS_MGMT_SEQ_ID_MAX \ + (ALIGN(SSS_MSG_TO_MGMT_LEN_MAX, SSS_SEG_LEN) / SSS_SEG_LEN) + +#define SSS_MGMT_LAST_SEG_LEN_MAX \ + (SSS_PF_MGMT_BUF_LEN_MAX - SSS_SEG_LEN * SSS_MGMT_SEQ_ID_MAX) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c new file mode 100644 index 00000000000000..2add4b1af94410 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_hwif_mgmt_common.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_hwif_adm_init.h" +#include "sss_hwif_mgmt_init.h" + +#define SSS_DEF_OUT_SIZE 1 + +struct sss_mgmt_msg_handle_work { + struct work_struct work; + struct sss_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + u8 no_ack; + u8 resvd; + + enum sss_mod_type mod; + u16 cmd; + u16 msg_id; +}; + +static void sss_send_response_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id) +{ + struct sss_mbx_msg_info info; + + info.msg_id = (u8)msg_id; + info.state = 0; + + sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size, + SSS_MGMT_SRC_ID, SSS_RESP_MSG, SSS_MSG_NO_ACK, &info); +} + +static void sss_mgmt_recv_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + u8 mod, u16 cmd, void *in_buf, + u16 in_size, u16 msg_id, int resp_need) +{ + u16 size; + u16 out_size = 0; + void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl; + void *out_buf = mgmt_msg->ack_buf; + + memset(out_buf, 0, SSS_PF_MGMT_BUF_LEN_MAX); + + if (mod >= SSS_MOD_TYPE_HW_MAX) { + sdk_warn(dev_hdl, "Recv illegal msg from mgmt cpu, mod = %d\n", mod); + out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + goto out; + } + + set_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + + if (!mgmt_msg->recv_handler[mod] || + !test_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod])) { + sdk_warn(dev_hdl, "Recv mgmt cb is null, mod = %d\n", mod); + clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + goto out; + } + + mgmt_msg->recv_handler[mod](mgmt_msg->recv_data[mod], + cmd, in_buf, in_size, out_buf, &out_size); + + clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + +out: + if (resp_need != 0) { + size = (out_size == 0) ? SSS_DEF_OUT_SIZE : out_size; + sss_send_response_mbx_to_mgmt(SSS_TO_HWDEV(mgmt_msg), mod, cmd, + out_buf, size, msg_id); + } +} + +static void sss_recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct sss_mgmt_msg_handle_work *msg_work = + container_of(work, struct sss_mgmt_msg_handle_work, work); + + sss_mgmt_recv_msg_handler(msg_work->pf_to_mgmt, msg_work->mod, + msg_work->cmd, msg_work->msg, msg_work->msg_len, msg_work->msg_id, + !msg_work->no_ack); + + destroy_work(&msg_work->work); + + kfree(msg_work->msg); + kfree(msg_work); +} + +static void sss_init_mgmt_recv_msg(struct sss_recv_msg *msg_recv, u64 msg_header) +{ + msg_recv->cmd = SSS_GET_MSG_HEADER(msg_header, CMD); + msg_recv->mod = SSS_GET_MSG_HEADER(msg_header, MODULE); + msg_recv->no_ack = SSS_GET_MSG_HEADER(msg_header, NO_ACK); + msg_recv->buf_len = SSS_GET_MSG_HEADER(msg_header, MSG_LEN); + msg_recv->msg_id = SSS_GET_MSG_HEADER(msg_header, MSG_ID); + msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX; +} + +static bool sss_check_mgmt_head_info(struct sss_recv_msg *msg_recv, u64 header) +{ + u8 seg_len = SSS_GET_MSG_HEADER(header, SEG_LEN); + u8 seg_id = SSS_GET_MSG_HEADER(header, SEQID); + u16 msg_id = SSS_GET_MSG_HEADER(header, MSG_ID); + + if (seg_id > SSS_MGMT_SEQ_ID_MAX || seg_len > SSS_SEG_LEN || + (seg_id == SSS_MGMT_SEQ_ID_MAX && seg_len > SSS_MGMT_LAST_SEG_LEN_MAX)) + return false; + + if (seg_id == 0) { + msg_recv->msg_id = msg_id; + msg_recv->seq_id = seg_id; + + return true; + } + + if (seg_id != (msg_recv->seq_id + 1) || msg_id != msg_recv->msg_id) + return false; + + msg_recv->seq_id = seg_id; + + return true; +} + +static void sss_mgmt_resp_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + struct sss_recv_msg *msg_recv) +{ + void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl; + + if ((msg_recv->msg_id & SSS_ASYNC_MSG_FLAG) != 0) + return; + + spin_lock(&mgmt_msg->sync_event_lock); + if (msg_recv->msg_id == mgmt_msg->sync_msg_id && + mgmt_msg->event_state == SSS_ADM_EVENT_START) { + mgmt_msg->event_state = SSS_ADM_EVENT_SUCCESS; + complete(&msg_recv->done); + spin_unlock(&mgmt_msg->sync_event_lock); + return; + } + + sdk_err(dev_hdl, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state); + sdk_err(dev_hdl, "Wait timeout, send and recv msg id(0x%x)(0x%x), event state=%d\n", + mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state); + spin_unlock(&mgmt_msg->sync_event_lock); +} + +static void sss_init_mgmt_msg_work(struct sss_msg_pf_to_mgmt *mgmt_msg, + struct sss_recv_msg *msg_recv) +{ + struct sss_mgmt_msg_handle_work *msg_work = NULL; + + msg_work = kzalloc(sizeof(*msg_work), GFP_KERNEL); + if (!msg_work) + return; + + if (msg_recv->buf_len != 0) { + msg_work->msg = kzalloc(msg_recv->buf_len, GFP_KERNEL); + if (!msg_work->msg) { + kfree(msg_work); + return; + } + } + + msg_work->pf_to_mgmt = mgmt_msg; + msg_work->msg_len = msg_recv->buf_len; + memcpy(msg_work->msg, msg_recv->buf, msg_recv->buf_len); + msg_work->msg_id = msg_recv->msg_id; + msg_work->mod = msg_recv->mod; + msg_work->cmd = msg_recv->cmd; + msg_work->no_ack = msg_recv->no_ack; + + INIT_WORK(&msg_work->work, sss_recv_mgmt_msg_work_handler); + queue_work_on(WORK_CPU_UNBOUND, mgmt_msg->workq, &msg_work->work); +} + +static void sss_recv_mgmt_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + u8 *msg_header, struct sss_recv_msg *msg_recv) +{ + u8 seq_id; + u8 seq_len; + u16 msg_id; + u32 msg_offset; + u64 dir; + u64 header = *((u64 *)msg_header); + void *msg_body; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mgmt_msg); + + dir = SSS_GET_MSG_HEADER(header, DIRECTION); + msg_id = SSS_GET_MSG_HEADER(header, MSG_ID); + if (dir == SSS_RESP_MSG && (msg_id & SSS_ASYNC_MSG_FLAG) != 0) + return; + + if (!sss_check_mgmt_head_info(msg_recv, header)) { + msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX; + sdk_err(hwdev->dev_hdl, "Fail to check Mgmt msg seq id and seq len\n"); + return; + } + + seq_len = SSS_GET_MSG_HEADER(header, SEG_LEN); + seq_id = SSS_GET_MSG_HEADER(header, SEQID); + msg_offset = seq_id * SSS_SEG_LEN; + msg_body = msg_header + sizeof(header); + memcpy((u8 *)msg_recv->buf + msg_offset, msg_body, seq_len); + + if (!SSS_GET_MSG_HEADER(header, LAST)) + return; + + sss_init_mgmt_recv_msg(msg_recv, header); + + if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_RESP_MSG) { + sss_mgmt_resp_msg_handler(mgmt_msg, msg_recv); + return; + } + + sss_init_mgmt_msg_work(mgmt_msg, msg_recv); +} + +static void sss_set_mbx_event_timeout(struct sss_hwdev *hwdev) +{ + struct sss_mbx *mbx = hwdev->mbx; + + spin_lock(&mbx->mbx_lock); + if (mbx->event_flag == SSS_EVENT_START) + mbx->event_flag = SSS_EVENT_TIMEOUT; + spin_unlock(&mbx->mbx_lock); +} + +void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *msg_header, u8 size) +{ + bool msg_dir; + struct sss_recv_msg *msg = NULL; + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + if (SSS_GET_MSG_HEADER(*(u64 *)msg_header, SOURCE) == SSS_MSG_SRC_MBX) { + sss_recv_mbx_aeq_handler(hwdev, msg_header, size); + return; + } + + mgmt_msg = dev->pf_to_mgmt; + if (!mgmt_msg) + return; + + msg_dir = SSS_GET_MSG_HEADER(*(u64 *)msg_header, DIRECTION) == SSS_DIRECT_SEND_MSG; + + msg = msg_dir ? &mgmt_msg->recv_msg : &mgmt_msg->recv_resp_msg; + + sss_recv_mgmt_msg_handler(mgmt_msg, msg_header, msg); +} + +void sss_force_complete_all(void *dev) +{ + struct sss_hwdev *hwdev = dev; + + spin_lock_bh(&hwdev->channel_lock); + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF && + test_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state)) + sss_complete_adm_event(hwdev); + + if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state)) + sss_set_mbx_event_timeout(hwdev); + + if (test_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state)) + sss_ctrlq_flush_sync_cmd(hwdev); + + spin_unlock_bh(&hwdev->channel_lock); +} + +void sss_flush_mgmt_workq(void *hwdev) +{ + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + flush_workqueue(dev->aeq_info->workq); + + if (sss_get_func_type(dev) != SSS_FUNC_TYPE_VF) + flush_workqueue(dev->pf_to_mgmt->workq); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h new file mode 100644 index 00000000000000..19196c2b6f9bcb --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MGMT_INIT_H +#define SSS_HWIF_MGMT_INIT_H + +#include "sss_hwdev.h" + +void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size); +void sss_force_complete_all(void *dev); +void sss_flush_mgmt_workq(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.c new file mode 100644 index 00000000000000..c50ae2daa06a3d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 3snic Technologies Co., Ltd */ + +#include +#include +#include "sss_kernel.h" +#include "sss_pci.h" + +#ifdef USE_OLD_PCI_FUNCTION +#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) + +MODULE_IMPORT_NS(CXL); + +int pci_disable_pcie_error_reporting(struct pci_dev *dev) +{ + int rc; + + if (!pcie_aer_is_native(dev)) + return -EIO; + + rc = pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); +} + +int pci_enable_pcie_error_reporting(struct pci_dev *dev) +{ + int rc; + + if (!pcie_aer_is_native(dev)) + return -EIO; + + rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); +} + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.h new file mode 100644 index 00000000000000..bebb1755350133 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_H +#define SSS_PCI_H + +#ifdef USE_OLD_PCI_FUNCTION +#include + +#define pci_pool dma_pool +#define pci_pool_create(name, pdev, size, align, allocation) \ + dma_pool_create(name, &(pdev)->dev, size, align, allocation) +#define pci_pool_destroy(pool) dma_pool_destroy(pool) +#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) +#define pci_pool_zalloc(pool, flags, handle) \ + dma_pool_zalloc(pool, flags, handle) +#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) + +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_mask(&dev->dev, mask); +} + +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_coherent_mask(&dev->dev, mask); +} + +int pci_disable_pcie_error_reporting(struct pci_dev *dev); +int pci_enable_pcie_error_reporting(struct pci_dev *dev); +#endif + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c new file mode 100644 index 00000000000000..528d58a5445df4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev.h" +#include "sss_pci_error.h" + +static void sss_record_pcie_error(void *dev) +{ + struct sss_hwdev *hwdev = (struct sss_hwdev *)dev; + + atomic_inc(&hwdev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sdk_err(&pdev->dev, "Pci error, state: 0x%08x\n", state); + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (adapter) + sss_record_pcie_error(adapter->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h new file mode 100644 index 00000000000000..26e65d77b98e56 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_ERROR_H +#define SSS_PCI_ERROR_H + +#include + +pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev, + pci_channel_state_t state); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c new file mode 100644 index 00000000000000..3d77fb77d4aa1d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_global.h" + +static bool attach_uld = true; +module_param(attach_uld, bool, 0444); +MODULE_PARM_DESC(attach_uld, "enable attach upper driver - default is true"); + +static struct sss_uld_info g_uld_info[SSS_SERVICE_TYPE_MAX]; + +static const char *g_uld_name[SSS_SERVICE_TYPE_MAX] = { + "nic", "ovs", "roce", "toe", "ioe", + "fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "custom" +}; + +/* lock for attach/detach all uld and register/ unregister uld */ +struct mutex g_uld_mutex; + +void sss_init_uld_lock(void) +{ + mutex_init(&g_uld_mutex); +} + +void sss_lock_uld(void) +{ + mutex_lock(&g_uld_mutex); +} + +void sss_unlock_uld(void) +{ + mutex_unlock(&g_uld_mutex); +} + +const char **sss_get_uld_names(void) +{ + return g_uld_name; +} + +struct sss_uld_info *sss_get_uld_info(void) +{ + return g_uld_info; +} + +bool sss_attach_is_enable(void) +{ + return attach_uld; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h new file mode 100644 index 00000000000000..c703eb3ab0d28f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_GLOBAL_H +#define SSS_PCI_GLOBAL_H + +#include + +#include "sss_hw_uld_driver.h" + +struct sss_uld_info *sss_get_uld_info(void); +bool sss_attach_is_enable(void); +const char **sss_get_uld_names(void); +void sss_init_uld_lock(void); +void sss_lock_uld(void); +void sss_unlock_uld(void); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h new file mode 100644 index 00000000000000..699748a46505e6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_ID_TBL_H +#define SSS_PCI_ID_TBL_H + +#define PCI_VENDOR_ID_SSSNIC 0x1F3F +#define SSS_DEV_ID_STANDARD 0x9020 +#define SSS_DEV_ID_SPN120 0x9021 +#define SSS_DEV_ID_VF 0x9001 +#define SSS_DEV_ID_VF_HV 0x9002 +#define SSS_DEV_ID_SPU 0xAC00 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c new file mode 100644 index 00000000000000..c953f54d574b72 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c @@ -0,0 +1,589 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_init.h" +#include "sss_hwdev_api.h" +#include "sss_pci_remove.h" +#include "sss_pci_global.h" +#include "sss_pci_probe.h" +#include "sss_tool.h" + +#define SSS_SYNC_YEAR_OFFSET 1900 +#define SSS_SYNC_MONTH_OFFSET 1 + +#define SSS_CHECK_EVENT_INFO(event) \ + ((event)->service == SSS_EVENT_SRV_COMM && \ + (event)->type == SSS_EVENT_FAULT) + +#define SSS_CHECK_FAULT_EVENT_INFO(hwdev, fault_event) \ + ((fault_event)->fault_level == SSS_FAULT_LEVEL_SERIOUS_FLR && \ + (fault_event)->info.chip.func_id < sss_get_max_pf_num(hwdev)) + +#define SSS_GET_CFG_REG_BAR(pdev) (SSS_IS_VF_DEV(pdev) ? \ + SSS_VF_PCI_CFG_REG_BAR : SSS_PF_PCI_CFG_REG_BAR) + +static bool sss_get_vf_load_state(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = NULL; + struct pci_dev *dev = NULL; + + if (pci_is_root_bus(pdev->bus)) + return false; + + dev = pdev->is_virtfn ? pdev->physfn : pdev; + adapter = pci_get_drvdata(dev); + + if (!adapter) { + sdk_err(&pdev->dev, "Invalid adapter, is null.\n"); + return false; + } + + return true; +} + +static int sss_init_pci_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to enable pci device\n"); + goto enable_err; + } + + ret = pci_request_regions(pdev, SSS_DRV_NAME); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to request regions\n"); + goto regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + sdk_warn(&pdev->dev, "Fail to set 64-bit DMA mask\n"); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set DMA mask\n"); + goto dma_err; + } + } + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + sdk_warn(&pdev->dev, "Fail to set 64-bit coherent DMA mask\n"); + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set coherent DMA mask\n"); + goto dma_err; + } + } + + return 0; + +dma_err: + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + +regions_err: + pci_disable_device(pdev); + +enable_err: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state) +{ + mutex_lock(&adapter->uld_attach_mutex); + adapter->init_state = state; + mutex_unlock(&adapter->uld_attach_mutex); +} + +static int sss_map_pci_bar(struct pci_dev *pdev, + struct sss_pci_adapter *adapter) +{ + adapter->db_base_paddr = pci_resource_start(pdev, SSS_PCI_DB_BAR); + adapter->db_dwqe_len = pci_resource_len(pdev, SSS_PCI_DB_BAR); + adapter->db_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_DB_BAR); + if (!adapter->db_reg_bar) { + sdk_err(&pdev->dev, "Fail to map db reg bar\n"); + return -ENOMEM; + } + + if (!SSS_IS_VF_DEV(pdev)) { + adapter->mgmt_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_MGMT_REG_BAR); + if (!adapter->mgmt_reg_bar) { + sdk_err(&pdev->dev, "Fail to map mgmt reg bar\n"); + goto mgmt_bar_err; + } + } + + adapter->intr_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_INTR_REG_BAR); + if (!adapter->intr_reg_bar) { + sdk_err(&pdev->dev, "Fail to map intr reg bar\n"); + goto intr_bar_err; + } + + adapter->cfg_reg_bar = pci_ioremap_bar(pdev, SSS_GET_CFG_REG_BAR(pdev)); + if (!adapter->cfg_reg_bar) { + sdk_err(&pdev->dev, "Fail to map config reg bar\n"); + goto cfg_bar_err; + } + + return 0; + +cfg_bar_err: + iounmap(adapter->intr_reg_bar); + +intr_bar_err: + if (!SSS_IS_VF_DEV(pdev)) + iounmap(adapter->mgmt_reg_bar); + +mgmt_bar_err: + iounmap(adapter->db_reg_bar); + + return -ENOMEM; +} + +static void sss_send_event_to_uld(struct sss_pci_adapter *adapter, + struct sss_event_info *event_info) +{ + enum sss_service_type type; + const char **uld_name = sss_get_uld_names(); + struct sss_uld_info *uld_info = sss_get_uld_info(); + + for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) { + if (test_and_set_bit(type, &adapter->uld_run_state)) { + sdk_warn(&adapter->pcidev->dev, + "Fail to send event, svc: 0x%x, event type: 0x%x, uld_name: %s\n", + event_info->service, event_info->type, uld_name[type]); + continue; + } + + if (uld_info[type].event) + uld_info[type].event(&adapter->hal_dev, + adapter->uld_dev[type], event_info); + clear_bit(type, &adapter->uld_run_state); + } +} + +static void sss_send_event_to_dst(struct sss_pci_adapter *adapter, u16 func_id, + struct sss_event_info *event_info) +{ + struct sss_pci_adapter *dest_adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) { + if (adapter->init_state == SSS_IN_REMOVE) + continue; + if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (sss_get_global_func_id(dest_adapter->hwdev) == func_id) { + sss_send_event_to_uld(dest_adapter, event_info); + break; + } + } + sss_put_chip_node(); +} + +static void sss_send_event_to_all_pf(struct sss_pci_adapter *adapter, + struct sss_event_info *event_info) +{ + struct sss_pci_adapter *dest_adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) { + if (adapter->init_state == SSS_IN_REMOVE) + continue; + + if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + sss_send_event_to_uld(dest_adapter, event_info); + } + sss_put_chip_node(); +} + +static void sss_process_event(void *data, struct sss_event_info *event_info) +{ + u16 id; + struct sss_pci_adapter *pci_adapter = data; + struct sss_fault_event *fault_event = (void *)event_info->event_data; + + if (SSS_CHECK_EVENT_INFO(event_info) && + SSS_CHECK_FAULT_EVENT_INFO(pci_adapter->hwdev, fault_event)) { + id = fault_event->info.chip.func_id; + return sss_send_event_to_dst(pci_adapter, id, event_info); + } + + if (event_info->type == SSS_EVENT_MGMT_WATCHDOG) + sss_send_event_to_all_pf(pci_adapter, event_info); + else + sss_send_event_to_uld(pci_adapter, event_info); +} + +static void sss_sync_time_to_chip(struct sss_pci_adapter *adapter) +{ + int ret; + u64 mstime; + struct timeval val = {0}; + struct rtc_time r_time = {0}; + + do_gettimeofday(&val); + + mstime = (u64)(val.tv_sec * MSEC_PER_SEC + val.tv_usec / USEC_PER_MSEC); + ret = sss_chip_sync_time(adapter->hwdev, mstime); + if (ret != 0) { + sdk_err(&adapter->pcidev->dev, "Fail to sync UTC time to fw, ret:%d.\n", ret); + } else { + rtc_time_to_tm((unsigned long)(val.tv_sec), &r_time); + sdk_info(&adapter->pcidev->dev, + "Success to sync UTC time to fw. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + r_time.tm_year + SSS_SYNC_YEAR_OFFSET, + r_time.tm_mon + SSS_SYNC_MONTH_OFFSET, + r_time.tm_mday, r_time.tm_hour, r_time.tm_min, r_time.tm_sec); + } +} + +static int sss_attach_uld_driver(struct sss_pci_adapter *adapter, + enum sss_service_type type, const struct sss_uld_info *uld_info) +{ + int ret = 0; + void *uld = NULL; + const char **name = sss_get_uld_names(); + struct pci_dev *pdev = adapter->pcidev; + + mutex_lock(&adapter->uld_attach_mutex); + + if (adapter->uld_dev[type]) { + sdk_err(&pdev->dev, "Fail to attach pci dev, driver %s\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); + return 0; + } + + ret = uld_info->probe(&adapter->hal_dev, &uld, adapter->uld_dev_name[type]); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to probe for driver %s\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); + return ret; + } + + adapter->uld_dev[type] = uld; + set_bit(type, &adapter->uld_attach_state); + mutex_unlock(&adapter->uld_attach_mutex); + + sdk_info(&pdev->dev, "Success to attach %s driver\n", name[type]); + + return 0; +} + +static bool sss_get_vf_service_load(struct pci_dev *pdev, + enum sss_service_type service_type) +{ + struct sss_pci_adapter *adapter = NULL; + struct pci_dev *dev = NULL; + + if (!pdev) { + pr_err("Invalid pdev, is null.\n"); + return false; + } + + dev = (pdev->is_virtfn != 0) ? pdev->physfn : pdev; + + adapter = pci_get_drvdata(dev); + if (!adapter) { + sdk_err(&pdev->dev, "Invalid pci adapter, is null.\n"); + return false; + } + + return true; +} + +static void sss_attach_all_uld_driver(struct sss_pci_adapter *adapter) +{ + enum sss_service_type type; + struct pci_dev *pdev = adapter->pcidev; + struct sss_uld_info *info = sss_get_uld_info(); + + sss_hold_chip_node(); + sss_lock_uld(); + for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) { + if (!info[type].probe) + continue; + if (pdev->is_virtfn && + !sss_get_vf_service_load(pdev, type)) { + sdk_info(&pdev->dev, + "VF dev disable service_type = %d load in host\n", type); + continue; + } + sss_attach_uld_driver(adapter, type, &info[type]); + } + sss_unlock_uld(); + sss_put_chip_node(); +} + +static int sss_attach_uld_dev(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + adapter->hal_dev.pdev = pdev; + adapter->hal_dev.hwdev = adapter->hwdev; + + if (!sss_attach_is_enable()) + return 0; + + sss_attach_all_uld_driver(adapter); + + return 0; +} + +int sss_register_uld(enum sss_service_type type, struct sss_uld_info *uld_info) +{ + struct sss_pci_adapter *adapter = NULL; + struct sss_card_node *card_node = NULL; + struct list_head *list = NULL; + struct sss_uld_info *info = sss_get_uld_info(); + const char **uld_name = sss_get_uld_names(); + + if (type >= SSS_SERVICE_TYPE_MAX) { + pr_err("Unknown type %d of uld to register\n", type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid info of %s driver to register\n", uld_name[type]); + return -EINVAL; + } + + sss_hold_chip_node(); + sss_lock_uld(); + + if (info[type].probe) { + sss_unlock_uld(); + sss_put_chip_node(); + pr_err("Driver %s already register\n", uld_name[type]); + return -EINVAL; + } + + list = sss_get_chip_list(); + memcpy(&info[type], uld_info, sizeof(*uld_info)); + list_for_each_entry(card_node, list, node) { + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_attach_uld_driver(adapter, type, uld_info) != 0) { + sdk_err(&adapter->pcidev->dev, + "Fail to attach %s driver to pci dev\n", uld_name[type]); + continue; + } + } + } + + sss_unlock_uld(); + sss_put_chip_node(); + + pr_info("Success to register %s driver\n", uld_name[type]); + return 0; +} +EXPORT_SYMBOL(sss_register_uld); + +static int sss_notify_ok_to_chip(struct sss_pci_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + ret = sss_chip_set_pci_bdf_num(adapter->hwdev, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set BDF info to chip\n"); + return ret; + } + + return 0; +} + +static int sss_init_function(struct pci_dev *pdev, struct sss_pci_adapter *adapter) +{ + int ret; + + ret = sss_init_hwdev(adapter); + if (ret != 0) { + adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Fail to init hardware device\n"); + return -EFAULT; + } + + sss_register_dev_event(adapter->hwdev, adapter, sss_process_event); + + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + set_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state); + sss_sync_time_to_chip(adapter); + } + + sss_chip_node_lock(); + ret = sss_tool_init(adapter->hwdev, adapter->chip_node); + if (ret) { + sss_chip_node_unlock(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + goto nictool_init_err; + } + sss_chip_node_unlock(); + + sss_add_func_list(adapter); + + ret = sss_attach_uld_dev(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to attach uld dev\n"); + goto attach_uld_err; + } + + return 0; + +attach_uld_err: + sss_del_func_list(adapter); + + sss_chip_node_lock(); + sss_tool_uninit(adapter->hwdev, adapter->chip_node); + sss_chip_node_unlock(); +nictool_init_err: + sss_unregister_dev_event(adapter->hwdev); + + sss_deinit_hwdev(adapter->hwdev); + + return ret; +} + +static int sss_init_adapter(struct sss_pci_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + + if (pdev->is_virtfn != 0 && (!sss_get_vf_load_state(pdev))) { + sdk_info(&pdev->dev, "Vf dev disable load in host\n"); + return 0; + } + + sss_set_adapter_probe_state(adapter, SSS_PROBE_START); + + ret = sss_map_pci_bar(pdev, adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to map bar\n"); + goto map_bar_fail; + } + + /* if chip information of pcie function exist, add the function into chip */ + ret = sss_alloc_chip_node(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + + ret = sss_init_function(pdev, adapter); + if (ret != 0) + goto func_init_err; + + ret = sss_notify_ok_to_chip(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to notify ok\n"); + goto notify_err; + } + + sss_set_adapter_probe_state(adapter, SSS_PROBE_OK); + + return 0; + +notify_err: + sss_deinit_function(pdev); + +func_init_err: + sss_free_chip_node(adapter); + +alloc_chip_node_fail: + sss_unmap_pci_bar(adapter); + +map_bar_fail: + sdk_err(&pdev->dev, "Fail to init adapter\n"); + return ret; +} + +static void sss_init_adapter_param(struct sss_pci_adapter *adapter, + struct pci_dev *pdev) +{ + adapter->pcidev = pdev; + adapter->init_state = SSS_NO_PROBE; + spin_lock_init(&adapter->dettach_uld_lock); + mutex_init(&adapter->uld_attach_mutex); + pci_set_drvdata(pdev, adapter); +} + +int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct sss_pci_adapter *adapter = NULL; + + sdk_info(&pdev->dev, "Pci probe begin\n"); + + if (!pdev) + return -EINVAL; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + ret = -ENOMEM; + goto init_pci_err; + } + + sss_init_adapter_param(adapter, pdev); + + ret = sss_init_pci_dev(pdev); + if (ret != 0) { + kfree(adapter); + sdk_err(&pdev->dev, "Fail to init pci device\n"); + goto init_pci_err; + } + + ret = sss_init_adapter(adapter); + if (ret != 0) + goto init_adapter_err; + + sdk_info(&pdev->dev, "Success to probe pci\n"); + return 0; + +init_adapter_err: + sss_deinit_pci_dev(pdev); + +init_pci_err: + sdk_err(&pdev->dev, "Fail to pci probe\n"); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h new file mode 100644 index 00000000000000..64cb4ab6a6e1ce --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_PROBE_H +#define SSS_PCI_PROBE_H + +#include + +#include "sss_adapter.h" + +int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c new file mode 100644 index 00000000000000..f7285c1372a32c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_init.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_mgmt_init.h" +#include "sss_pci_global.h" +#include "sss_tool.h" +#include "sss_pci_remove.h" + +#define SSS_WAIT_SRIOV_CFG_TIMEOUT 15000 +#define SSS_EVENT_PROCESS_TIMEOUT 10000 + +#define SSS_SRIOV_MIN_USLEEP 9900 +#define SSS_SRIOV_MAX_USLEEP 10000 + +#define SSS_EVENT_MIN_USLEEP 900 +#define SSS_EVENT_MAX_USLEEP 1000 + +static void sss_set_adapter_remove_state(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + mutex_lock(&adapter->uld_attach_mutex); + if (adapter->init_state != SSS_PROBE_OK) { + sdk_warn(&pdev->dev, "Current function don not need remove\n"); + mutex_unlock(&adapter->uld_attach_mutex); + } + adapter->init_state = SSS_IN_REMOVE; + mutex_unlock(&adapter->uld_attach_mutex); +} + +static void sss_wait_sriov_cfg_complete(struct sss_pci_adapter *adapter) +{ + unsigned long end_time; + struct sss_sriov_info *info = &adapter->sriov_info; + + clear_bit(SSS_SRIOV_PRESENT, &info->state); + usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP); + + end_time = jiffies + msecs_to_jiffies(SSS_WAIT_SRIOV_CFG_TIMEOUT); + do { + if (!test_bit(SSS_SRIOV_ENABLE, &info->state) && + !test_bit(SSS_SRIOV_DISABLE, &info->state)) + return; + + usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP); + } while (time_before(jiffies, end_time)); +} + +static bool sss_wait_uld_dev_timeout(struct sss_pci_adapter *adapter, + enum sss_service_type type) +{ + unsigned long end_time; + + end_time = jiffies + msecs_to_jiffies(SSS_EVENT_PROCESS_TIMEOUT); + do { + if (!test_and_set_bit(type, &adapter->uld_run_state)) + return false; + + usleep_range(SSS_EVENT_MIN_USLEEP, SSS_EVENT_MAX_USLEEP); + } while (time_before(jiffies, end_time)); + + if (!test_and_set_bit(type, &adapter->uld_run_state)) + return false; + + return true; +} + +void sss_detach_uld_driver(struct sss_pci_adapter *adapter, + enum sss_service_type type) +{ + bool timeout; + struct sss_uld_info *info = sss_get_uld_info(); + const char **name = sss_get_uld_names(); + + mutex_lock(&adapter->uld_attach_mutex); + if (!adapter->uld_dev[type]) { + mutex_unlock(&adapter->uld_attach_mutex); + return; + } + + timeout = sss_wait_uld_dev_timeout(adapter, type); + + spin_lock_bh(&adapter->dettach_uld_lock); + clear_bit(type, &adapter->uld_attach_state); + spin_unlock_bh(&adapter->dettach_uld_lock); + + info[type].remove(&adapter->hal_dev, adapter->uld_dev[type]); + adapter->uld_dev[type] = NULL; + + if (!timeout) + clear_bit(type, &adapter->uld_run_state); + + sdk_info(&adapter->pcidev->dev, + "Success to detach %s driver from pci device\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); +} + +void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter) +{ + struct sss_uld_info *info = sss_get_uld_info(); + enum sss_service_type type; + + sss_hold_chip_node(); + sss_lock_uld(); + for (type = SSS_SERVICE_TYPE_MAX - 1; type > SSS_SERVICE_TYPE_NIC; type--) { + if (info[type].probe) + sss_detach_uld_driver(adapter, type); + } + + if (info[SSS_SERVICE_TYPE_NIC].probe) + sss_detach_uld_driver(adapter, SSS_SERVICE_TYPE_NIC); + sss_unlock_uld(); + sss_put_chip_node(); +} + +void sss_dettach_uld_dev(struct sss_pci_adapter *adapter) +{ + sss_detach_all_uld_driver(adapter); +} + +void sss_unregister_uld(enum sss_service_type type) +{ + struct sss_pci_adapter *adapter = NULL; + struct sss_card_node *card_node = NULL; + struct list_head *card_list = NULL; + struct sss_uld_info *info = sss_get_uld_info(); + + if (type >= SSS_SERVICE_TYPE_MAX) { + pr_err("Unknown type %d of uld to unregister\n", type); + return; + } + + sss_hold_chip_node(); + sss_lock_uld(); + card_list = sss_get_chip_list(); + list_for_each_entry(card_node, card_list, node) { + /* detach vf first */ + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + sss_detach_uld_driver(adapter, type); + + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PF) + sss_detach_uld_driver(adapter, type); + + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PPF) + sss_detach_uld_driver(adapter, type); + } + + memset(&info[type], 0, sizeof(*info)); + sss_unlock_uld(); + sss_put_chip_node(); +} +EXPORT_SYMBOL(sss_unregister_uld); + +void sss_deinit_function(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sss_chip_disable_mgmt_channel(adapter->hwdev); + + sss_flush_mgmt_workq(adapter->hwdev); + + sss_del_func_list(adapter); + + sss_chip_node_lock(); + sss_tool_uninit(adapter->hwdev, adapter->chip_node); + sss_chip_node_unlock(); + + sss_dettach_uld_dev(adapter); + + sss_unregister_dev_event(adapter->hwdev); + + sss_deinit_hwdev(adapter->hwdev); +} + +void sss_unmap_pci_bar(struct sss_pci_adapter *adapter) +{ + iounmap(adapter->cfg_reg_bar); + iounmap(adapter->intr_reg_bar); + + if (!SSS_IS_VF_DEV(adapter->pcidev)) + iounmap(adapter->mgmt_reg_bar); + + iounmap(adapter->db_reg_bar); +} + +int sss_deinit_adapter(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + sss_set_adapter_remove_state(adapter); + + sss_hwdev_detach(adapter->hwdev); + + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + sss_wait_sriov_cfg_complete(adapter); + sss_pci_disable_sriov(adapter); + } + + sss_deinit_function(pdev); + + sss_free_chip_node(adapter); + + sss_unmap_pci_bar(adapter); + + sss_set_adapter_probe_state(adapter, SSS_NO_PROBE); + + sdk_info(&pdev->dev, "Pcie device removed function\n"); + + return 0; +} + +void sss_deinit_pci_dev(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(adapter); +} + +void sss_pci_remove(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + if (!adapter) + return; + + sdk_info(&pdev->dev, "Begin pcie device remove\n"); + + sss_deinit_adapter(adapter); + + sss_deinit_pci_dev(pdev); + + sdk_info(&pdev->dev, "Success to remove pcie device\n"); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h new file mode 100644 index 00000000000000..ddd760ee53dff4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_REMOVE_H +#define SSS_PCI_REMOVE_H + +#include + +#include "sss_hw_svc_cap.h" +#include "sss_adapter.h" + +void sss_detach_uld_driver(struct sss_pci_adapter *adapter, enum sss_service_type type); +void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter); +void sss_dettach_uld_dev(struct sss_pci_adapter *adapter); +void sss_deinit_function(struct pci_dev *pdev); +void sss_unmap_pci_bar(struct sss_pci_adapter *adapter); +int sss_deinit_adapter(struct sss_pci_adapter *adapter); +void sss_deinit_pci_dev(struct pci_dev *pdev); + +void sss_pci_remove(struct pci_dev *pdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c new file mode 100644 index 00000000000000..9bcb32d22b2df6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_init.h" +#include "sss_pci_shutdown.h" + +void sss_pci_shutdown(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sdk_info(&pdev->dev, "Shutdown device\n"); + + if (adapter) + sss_hwdev_shutdown(adapter->hwdev); + + pci_disable_device(pdev); + + if (adapter) + sss_hwdev_stop(adapter->hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h new file mode 100644 index 00000000000000..7c9e92edda6ecb --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_SHUTDOWN_H +#define SSS_PCI_SHUTDOWN_H + +#include + +void sss_pci_shutdown(struct pci_dev *pdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c new file mode 100644 index 00000000000000..88fead9f65cb54 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_pci_sriov.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_api.h" + +#ifdef CONFIG_PCI_IOV +static int sss_init_vf_hw(void *hwdev, u16 vf_num) +{ + int ret; + u16 i; + u16 id; + + /* mbx msg channel resources will be freed during remove process */ + ret = sss_init_func_mbx_msg(hwdev, sss_get_max_vf_num(hwdev)); + if (ret != 0) + return ret; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = 1; i <= vf_num; i++) { + id = sss_get_glb_pf_vf_offset(hwdev) + i; + ret = sss_chip_set_wq_page_size(hwdev, id, SSS_DEFAULT_WQ_PAGE_SIZE); + if (ret != 0) + return ret; + } + + return 0; +} + +static void sss_deinit_vf_hw(void *hwdev, u16 vf_num) +{ + u16 i; + u16 id; + + for (i = 1; i <= vf_num; i++) { + id = sss_get_glb_pf_vf_offset(hwdev) + i; + sss_chip_set_wq_page_size(hwdev, id, SSS_HW_WQ_PAGE_SIZE); + } +} + +static void sss_notify_sriov_state_change(void *hwdev, u16 vf_num) +{ + struct sss_event_info event = {0}; + + event.service = SSS_EVENT_SRV_COMM; + event.type = SSS_EVENT_SRIOV_STATE_CHANGE; + + if (vf_num > 0) { + ((struct sss_sriov_state_info *)(void *)event.event_data)->enable = 1; + ((struct sss_sriov_state_info *)(void *)event.event_data)->vf_num = vf_num; + } + + sss_do_event_callback(hwdev, &event); +} +#endif + +int sss_pci_disable_sriov(struct sss_pci_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + void *hwdev = adapter->hwdev; + struct pci_dev *pdev = adapter->pcidev; + struct sss_sriov_info *info = &adapter->sriov_info; + + if (!info->enabled) + return 0; + + if (test_and_set_bit(SSS_SRIOV_DISABLE, &info->state)) { + sdk_err(&pdev->dev, "SR-IOV disable in process."); + return -EPERM; + } + + if (pci_vfs_assigned(pdev) != 0) { + clear_bit(SSS_SRIOV_DISABLE, &info->state); + sdk_warn(&pdev->dev, "VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + sss_notify_sriov_state_change(hwdev, 0); + + info->enabled = false; + + pci_disable_sriov(pdev); + + sss_deinit_vf_hw(hwdev, (u16)info->vf_num); + info->vf_num = 0; + + clear_bit(SSS_SRIOV_DISABLE, &info->state); + +#endif + + return 0; +} + +#ifdef CONFIG_PCI_IOV +static int sss_check_existing_vf(struct sss_pci_adapter *adapter, u16 vf_num) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + int existing_vf = pci_num_vf(pdev); + struct sss_sriov_info *info = &adapter->sriov_info; + + if (existing_vf != 0 && existing_vf != vf_num) { + ret = sss_pci_disable_sriov(adapter); + if (ret != 0) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + } else if (existing_vf == vf_num) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return vf_num; + } + + return 0; +} +#endif + +static int sss_pci_enable_sriov(struct sss_pci_adapter *adapter, u16 vf_num) +{ +#ifdef CONFIG_PCI_IOV + int ret = 0; + void *hwdev = adapter->hwdev; + struct pci_dev *pdev = adapter->pcidev; + struct sss_sriov_info *info = &adapter->sriov_info; + + if (test_and_set_bit(SSS_SRIOV_ENABLE, &info->state)) { + sdk_err(&pdev->dev, "SR-IOV disable, vf_num %d\n", vf_num); + return -EPERM; + } + + if (vf_num > pci_sriov_get_totalvfs(pdev)) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return -ERANGE; + } + + ret = sss_check_existing_vf(adapter, vf_num); + if (ret != 0) + return ret; + + ret = sss_init_vf_hw(hwdev, vf_num); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to init vf in hw, ret: %d\n", ret); + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + + ret = pci_enable_sriov(pdev, vf_num); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to enable SR-IOV, ret: %d\n", ret); + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + + info->enabled = true; + info->vf_num = vf_num; + + sss_notify_sriov_state_change(hwdev, vf_num); + + clear_bit(SSS_SRIOV_ENABLE, &info->state); + + return vf_num; +#else + + return 0; +#endif +} + +int sss_pci_configure_sriov(struct pci_dev *pdev, int vf_num) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + if (!adapter) + return -EFAULT; + + if (!test_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state)) + return -EFAULT; + + return (vf_num == 0) ? sss_pci_disable_sriov(adapter) : + sss_pci_enable_sriov(adapter, (u16)vf_num); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h new file mode 100644 index 00000000000000..3146e8eb9f8f7d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_SRIOV_H +#define SSS_PCI_SRIOV_H + +#include +#include + +#include "sss_sriov_info.h" +#include "sss_adapter.h" + +int sss_pci_disable_sriov(struct sss_pci_adapter *adapter); + +int sss_pci_configure_sriov(struct pci_dev *pdev, int num_vfs); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c new file mode 100644 index 00000000000000..96d57922821b6a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hw_wq.h" + +#define SSS_WQ_MIN_DEPTH 64 +#define SSS_WQ_MAX_DEPTH 65536 +#define SSS_WQ_MAX_PAGE_NUM (PAGE_SIZE / sizeof(u64)) + +static int sss_init_wq_block(struct sss_wq *wq) +{ + int i; + + if (SSS_WQ_IS_0_LEVEL_CLA(wq)) { + wq->block_paddr = wq->page[0].align_paddr; + wq->block_vaddr = wq->page[0].align_vaddr; + return 0; + } + + if (wq->page_num > SSS_WQ_MAX_PAGE_NUM) { + sdk_err(wq->dev_hdl, "Wq page num: 0x%x out of range: %lu\n", + wq->page_num, SSS_WQ_MAX_PAGE_NUM); + return -EFAULT; + } + + wq->block_vaddr = dma_zalloc_coherent(wq->dev_hdl, PAGE_SIZE, + &wq->block_paddr, GFP_KERNEL); + if (!wq->block_vaddr) { + sdk_err(wq->dev_hdl, "Fail to alloc wq block vaddr\n"); + return -ENOMEM; + } + + for (i = 0; i < wq->page_num; i++) + wq->block_vaddr[i] = cpu_to_be64(wq->page[i].align_paddr); + + return 0; +} + +static void sss_deinit_wq_block(struct sss_wq *wq) +{ + if (!SSS_WQ_IS_0_LEVEL_CLA(wq)) + dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->block_vaddr, + wq->block_paddr); +} + +static int sss_alloc_wq_page(struct sss_wq *wq) +{ + int i; + int ret; + int id; + + wq->page = kcalloc(wq->page_num, sizeof(*wq->page), GFP_KERNEL); + if (!wq->page) + return -ENOMEM; + + for (id = 0; id < wq->page_num; id++) { + ret = sss_dma_zalloc_coherent_align(wq->dev_hdl, wq->page_size, + wq->page_size, GFP_KERNEL, &wq->page[id]); + if (ret != 0) { + sdk_err(wq->dev_hdl, "Fail to alloc wq dma page\n"); + goto dma_page_err; + } + } + + ret = sss_init_wq_block(wq); + if (ret != 0) + goto block_err; + + return 0; + +block_err: +dma_page_err: + for (i = 0; i < id; i++) + sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]); + + kfree(wq->page); + wq->page = NULL; + + return -ENOMEM; +} + +static void sss_free_wq_page(struct sss_wq *wq) +{ + int i; + + sss_deinit_wq_block(wq); + + for (i = 0; i < wq->page_num; i++) + sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]); + + kfree(wq->page); + wq->page = NULL; +} + +static void sss_init_wq_param(struct sss_hwdev *hwdev, struct sss_wq *wq, + u32 q_depth, u16 block_size) +{ + u32 page_size = ALIGN(hwdev->wq_page_size, PAGE_SIZE); + + wq->ci = 0; + wq->pi = 0; + wq->dev_hdl = hwdev->dev_hdl; + wq->q_depth = q_depth; + wq->id_mask = (u16)(q_depth - 1); + wq->elem_size = block_size; + wq->elem_size_shift = (u16)ilog2(wq->elem_size); + wq->page_size = page_size; + wq->elem_per_page = min(page_size / block_size, q_depth); + wq->elem_per_page_shift = (u16)ilog2(wq->elem_per_page); + wq->elem_per_page_mask = (u16)(wq->elem_per_page - 1); + wq->page_num = + (u16)(ALIGN(((u32)q_depth * block_size), page_size) / page_size); +} + +int sss_create_wq(void *hwdev, struct sss_wq *wq, u32 q_depth, u16 block_size) +{ + if (!wq || !hwdev) { + pr_err("Invalid wq or dev_hdl\n"); + return -EINVAL; + } + + if (q_depth < SSS_WQ_MIN_DEPTH || q_depth > SSS_WQ_MAX_DEPTH || + (q_depth & (q_depth - 1)) != 0) { + sdk_err(SSS_TO_DEV(hwdev), "Invalid q_depth(%u)\n", q_depth); + return -EINVAL; + } + + if (block_size == 0 || (block_size & (block_size - 1)) != 0) { + sdk_err(SSS_TO_DEV(hwdev), "Invalid block_size(%u)\n", block_size); + return -EINVAL; + } + + sss_init_wq_param(hwdev, wq, q_depth, block_size); + + return sss_alloc_wq_page(wq); +} +EXPORT_SYMBOL(sss_create_wq); + +void sss_destroy_wq(struct sss_wq *wq) +{ + if (!wq) + return; + + sss_free_wq_page(wq); +} +EXPORT_SYMBOL(sss_destroy_wq); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h new file mode 100644 index 00000000000000..073f44213a0c56 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSSNIC_NICTOOL_H +#define SSSNIC_NICTOOL_H + +#include "sss_tool_chip.h" +#include "sss_tool_sdk.h" +#include "sss_tool_sm.h" +#include "sss_tool_comm.h" + +#ifndef _LLT_TEST_ +#define SSS_TOOL_PAGE_ORDER (10) +#else +#define SSS_TOOL_PAGE_ORDER (1) +#endif + +#define SSS_TOOL_MEM_MAP_SIZE (PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER)) + +#define SSS_TOOL_CARD_MAX (64) + +int sss_tool_init(void *hwdev, void *chip_node); +void sss_tool_uninit(void *hwdev, void *chip_node); + +extern u64 g_card_pa[SSS_TOOL_CARD_MAX]; +extern void *g_card_va[SSS_TOOL_CARD_MAX]; +extern int g_card_id; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c new file mode 100644 index 00000000000000..1e18f8426441d4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_common.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwif_adm.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_mgmt_common.h" +#include "sss_hwif_ctrlq.h" +#include "sss_hwif_api.h" +#include "sss_hw_common.h" +#include "sss_mgmt_channel.h" +#include "sss_linux_kernel.h" +#include "sss_csr.h" +#include "sss_hw.h" +#include "sss_adapter.h" +#include "sss_tool.h" + +#define SSS_TOOL_DW_WIDTH 4 + +/* completion timeout interval, unit is millisecond */ +#define SSS_TOOL_UPDATE_MSG_TIMEOUT 50000U + +#define SSS_TOOL_CLP_REG_GAP 0x20 +#define SSS_TOOL_CLP_INPUT_BUF_LEN 4096UL +#define SSS_TOOL_CLP_DATA_UNIT 4UL +#define SSS_TOOL_CLP_MAX_DATA_SIZE (SSS_TOOL_CLP_INPUT_BUF_LEN / SSS_TOOL_CLP_DATA_UNIT) + +#define SSS_TOOL_CLP_REQ_SIZE_OFFSET 0 +#define SSS_TOOL_CLP_RSP_SIZE_OFFSET 16 +#define SSS_TOOL_CLP_BASE_OFFSET 0 +#define SSS_TOOL_CLP_LEN_OFFSET 0 +#define SSS_TOOL_CLP_START_OFFSET 31 +#define SSS_TOOL_CLP_READY_OFFSET 31 +#define SSS_TOOL_CLP_OFFSET(member) (SSS_TOOL_CLP_##member##_OFFSET) + +#define SSS_TOOL_CLP_SIZE_MASK 0x7ffUL +#define SSS_TOOL_CLP_BASE_MASK 0x7ffffffUL +#define SSS_TOOL_CLP_LEN_MASK 0x7ffUL +#define SSS_TOOL_CLP_START_MASK 0x1UL +#define SSS_TOOL_CLP_READY_MASK 0x1UL +#define SSS_TOOL_CLP_MASK(member) (SSS_TOOL_CLP_##member##_MASK) + +#define SSS_TOOL_CLP_DELAY_CNT_MAX 200UL +#define SSS_TOOL_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define SSS_TOOL_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define SSS_TOOL_CLP_LEN_REG_MAX 0x3ff +#define SSS_TOOL_CLP_START_OR_READY_REG_MAX 0x1 + +#define SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header) \ + (((in_size) + (u16)sizeof(header) + \ + (((in_size) % SSS_TOOL_CLP_DATA_UNIT) ? SSS_TOOL_CLP_DATA_UNIT : 0)) / \ + SSS_TOOL_CLP_DATA_UNIT) + +#define SSS_TOOL_CLP_REG_VALUE(value, offset, mask) \ + (((value) >> SSS_TOOL_CLP_OFFSET(offset)) & SSS_TOOL_CLP_MASK(mask)) + +enum sss_tool_clp_data_type { + SSS_TOOL_CLP_REQ = 0, + SSS_TOOL_CLP_RSP = 1 +}; + +enum sss_tool_clp_reg_type { + SSS_TOOL_CLP_BASE = 0, + SSS_TOOL_CLP_SIZE = 1, + SSS_TOOL_CLP_LEN = 2, + SSS_TOOL_CLP_START_REQ = 3, + SSS_TOOL_CLP_READY_RSP = 4 +}; + +enum SSS_TOOL_ADM_CSR_DATA_OPERATION { + SSS_TOOL_ADM_CSR_WRITE = 0x1E, + SSS_TOOL_ADM_CSR_READ = 0x1F +}; + +enum SSS_TOOL_ADM_CSR_NEED_RESP_DATA { + SSS_TOOL_ADM_CSR_NO_RESP_DATA = 0, + SSS_TOOL_ADM_CSR_NEED_RESP_DATA = 1 +}; + +enum SSS_TOOL_ADM_CSR_DATA_SIZE { + SSS_TOOL_ADM_CSR_DATA_SZ_32 = 0, + SSS_TOOL_ADM_CSR_DATA_SZ_64 = 1 +}; + +struct sss_tool_csr_request_adm_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +struct sss_tool_csr_read { + u32 rd_len; + u32 addr; +}; + +struct sss_tool_csr_write { + u32 rd_len; + u32 addr; + u8 *data; +}; + +static u32 sss_tool_get_timeout_val(enum sss_mod_type mod, u16 cmd) +{ + if (mod == SSS_MOD_TYPE_COMM && + (cmd == SSS_COMM_MGMT_CMD_UPDATE_FW || + cmd == SSS_COMM_MGMT_CMD_UPDATE_BIOS || + cmd == SSS_COMM_MGMT_CMD_ACTIVE_FW || + cmd == SSS_COMM_MGMT_CMD_SWITCH_CFG || + cmd == SSS_COMM_MGMT_CMD_HOT_ACTIVE_FW)) + return SSS_TOOL_UPDATE_MSG_TIMEOUT; + + return 0; /* use default mbox/adm timeout time */ +} + +static int sss_tool_get_clp_reg(void *hwdev, enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type type, u32 *addr) +{ + switch (type) { + case SSS_TOOL_CLP_BASE: + *addr = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_REG(REQBASE) : SSS_CLP_REG(RSPBASE); + break; + + case SSS_TOOL_CLP_SIZE: + *addr = SSS_CLP_REG(SIZE); + break; + + case SSS_TOOL_CLP_LEN: + *addr = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_REG(REQ) : SSS_CLP_REG(RSP); + break; + + case SSS_TOOL_CLP_START_REQ: + *addr = SSS_CLP_REG(REQ); + break; + + case SSS_TOOL_CLP_READY_RSP: + *addr = SSS_CLP_REG(RSP); + break; + + default: + *addr = 0; + break; + } + + return (*addr == 0) ? -EINVAL : 0; +} + +static inline int sss_tool_clp_param_valid(enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type) +{ + if (data_type == SSS_TOOL_CLP_REQ && reg_type == SSS_TOOL_CLP_READY_RSP) + return -EINVAL; + + if (data_type == SSS_TOOL_CLP_RSP && reg_type == SSS_TOOL_CLP_START_REQ) + return -EINVAL; + + return 0; +} + +static u32 sss_tool_get_clp_reg_value(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 reg_addr) +{ + u32 value; + + value = sss_chip_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SSS_TOOL_CLP_BASE: + value = SSS_TOOL_CLP_REG_VALUE(value, BASE, BASE); + break; + + case SSS_TOOL_CLP_SIZE: + if (data_type == SSS_TOOL_CLP_REQ) + value = SSS_TOOL_CLP_REG_VALUE(value, REQ_SIZE, SIZE); + else + value = SSS_TOOL_CLP_REG_VALUE(value, RSP_SIZE, SIZE); + break; + + case SSS_TOOL_CLP_LEN: + value = SSS_TOOL_CLP_REG_VALUE(value, LEN, LEN); + break; + + case SSS_TOOL_CLP_START_REQ: + value = SSS_TOOL_CLP_REG_VALUE(value, START, START); + break; + + case SSS_TOOL_CLP_READY_RSP: + value = SSS_TOOL_CLP_REG_VALUE(value, READY, READY); + break; + + default: + break; + } + + return value; +} + +static int sss_tool_read_clp_reg(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int ret; + + ret = sss_tool_clp_param_valid(data_type, reg_type); + if (ret) + return ret; + + ret = sss_tool_get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (ret) + return ret; + + *read_value = sss_tool_get_clp_reg_value(hwdev, data_type, reg_type, reg_addr); + + return 0; +} + +static int sss_tool_check_reg_value(enum sss_tool_clp_reg_type reg_type, u32 value) +{ + if (reg_type == SSS_TOOL_CLP_BASE && + value > SSS_TOOL_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == SSS_TOOL_CLP_SIZE && + value > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == SSS_TOOL_CLP_LEN && + value > SSS_TOOL_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == SSS_TOOL_CLP_START_REQ || + reg_type == SSS_TOOL_CLP_READY_RSP) && + value > SSS_TOOL_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static int sss_tool_check_clp_init_status(struct sss_hwdev *hwdev) +{ + int ret; + u32 reg_value = 0; + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_BASE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read clp reg: 0x%x\n", reg_value); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_BASE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read rsp ba value: 0x%x\n", reg_value); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_SIZE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read req size\n"); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_SIZE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void sss_tool_write_clp_reg(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (sss_tool_clp_param_valid(data_type, reg_type)) + return; + + if (sss_tool_check_reg_value(reg_type, value)) + return; + + if (sss_tool_get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = sss_chip_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SSS_TOOL_CLP_LEN: + reg_value &= (~(SSS_TOOL_CLP_MASK(LEN) << SSS_TOOL_CLP_OFFSET(LEN))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(LEN)); + break; + + case SSS_TOOL_CLP_START_REQ: + reg_value &= (~(SSS_TOOL_CLP_MASK(START) << SSS_TOOL_CLP_OFFSET(START))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(START)); + break; + + case SSS_TOOL_CLP_READY_RSP: + reg_value &= (~(SSS_TOOL_CLP_MASK(READY) << SSS_TOOL_CLP_OFFSET(READY))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(READY)); + break; + + default: + return; + } + + sss_chip_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int sss_tool_read_clp_data(struct sss_hwdev *hwdev, void *buf_out, u16 *out_size) +{ + int err; + u32 reg = SSS_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, &ready); + if (err || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX) { + tool_err("Fail to read clp delay rsp, timeout delay_cnt: %u\n", + delay_cnt); + return -EINVAL; + } + } + + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_LEN, &temp_out_size); + if (err) + return err; + + if (temp_out_size > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + tool_err("Invalid temp out size: %u\n", temp_out_size); + return -EINVAL; + } + + *out_size = (u16)temp_out_size; + for (; temp_out_size > 0; temp_out_size--) { + *ptr = sss_chip_read_reg(hwdev->hwif, reg); + ptr++; + /* read 4 bytes every time */ + reg = reg + 4; + } + + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, (u32)0x0); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, SSS_TOOL_CLP_LEN, (u32)0x0); + + return 0; +} + +static int sss_tool_write_clp_data(struct sss_hwdev *hwdev, void *buf_in, u16 in_size) +{ + int ret; + u32 reg = SSS_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + u16 size_in = in_size; + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_START_REQ, &start); + if (ret != 0) + return ret; + + while (start == 1) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_START_REQ, &start); + if (ret || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_LEN, size_in); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_START_REQ, (u32)0x1); + + for (; size_in > 0; size_in--) { + sss_chip_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + sizeof(u32); + } + + return 0; +} + +static void sss_tool_clear_clp_data(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type) +{ + u32 reg = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_DATA(REQ) : SSS_CLP_DATA(RSP); + u32 count = SSS_TOOL_CLP_MAX_DATA_SIZE; + + for (; count > 0; count--) { + sss_chip_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + sizeof(u32); + } +} + +static void sss_tool_clp_prepare_header(struct sss_hwdev *hwdev, u64 *header, + u16 msg_len, u8 mod, enum sss_mgmt_cmd cmd) +{ + struct sss_hwif *hwif = hwdev->hwif; + + *header = SSS_SET_MSG_HEADER(msg_len, MSG_LEN) | + SSS_SET_MSG_HEADER(mod, MODULE) | + SSS_SET_MSG_HEADER(msg_len, SEG_LEN) | + SSS_SET_MSG_HEADER(0, NO_ACK) | + SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) | + SSS_SET_MSG_HEADER(0, SEQID) | + SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) | + SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) | + SSS_SET_MSG_HEADER(0, DIRECTION) | + SSS_SET_MSG_HEADER(cmd, CMD) | + SSS_SET_MSG_HEADER(hwif->attr.func_id, SRC_GLB_FUNC_ID) | + SSS_SET_MSG_HEADER(0, MSG_ID); +} + +static int sss_tool_send_clp_msg(struct sss_hwdev *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + struct sss_clp_pf_to_mgmt *clp_msg; + u64 header; + u16 size; + u8 *msg_buf; + int ret; + + if (!hwdev || SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF) + return -EINVAL; + + if (!hwdev->chip_present_flag || !SSS_SUPPORT_CLP(hwdev)) + return -EPERM; + + clp_msg = hwdev->clp_pf_to_mgmt; + if (!clp_msg) + return -EPERM; + + msg_buf = clp_msg->clp_msg_buf; + + /* 4 bytes alignment */ + size = SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header); + + if (size > SSS_TOOL_CLP_MAX_DATA_SIZE) { + tool_err("Invalid data size: %u\n", size); + return -EINVAL; + } + down(&clp_msg->clp_msg_lock); + + ret = sss_tool_check_clp_init_status(hwdev); + if (ret) { + tool_err("Fail to check clp init status\n"); + up(&clp_msg->clp_msg_lock); + return ret; + } + + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, 0x0); + + /* Send request */ + memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN); + sss_tool_clp_prepare_header(hwdev, &header, in_size, mod, cmd); + + memcpy(msg_buf, &header, sizeof(header)); + msg_buf += sizeof(header); + memcpy(msg_buf, buf_in, in_size); + + msg_buf = clp_msg->clp_msg_buf; + + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_REQ); + ret = sss_tool_write_clp_data(hwdev, clp_msg->clp_msg_buf, size); + if (ret) { + tool_err("Fail to send clp request\n"); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + /* Get response */ + msg_buf = clp_msg->clp_msg_buf; + memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN); + ret = sss_tool_read_clp_data(hwdev, msg_buf, &size); + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP); + if (ret) { + tool_err("Fail to read clp response\n"); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + size = (u16)((size * SSS_TOOL_CLP_DATA_UNIT) & 0xffff); + if (size <= sizeof(header) || size > SSS_TOOL_CLP_INPUT_BUF_LEN) { + tool_err("Invalid response size: %u", size); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + if (size != *out_size + sizeof(header)) { + tool_err("Invalid size:%u, out_size: %u\n", size, *out_size); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (msg_buf + sizeof(header)), size); + up(&clp_msg->clp_msg_lock); + + return 0; +} + +static int sss_tool_adm_csr_rd32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 *val) +{ + int ret; + u32 csr_val = 0; + struct sss_tool_csr_request_adm_data adm_data = {0}; + + if (!hwdev || !val) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + adm_data.dw0 = 0; + adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_READ; + adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NEED_RESP_DATA; + adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32; + adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32); + adm_data.dw2.bits.csr_addr = addr; + adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32); + + ret = sss_adm_msg_read_ack(hwdev, dest, (u8 *)(&adm_data), + sizeof(adm_data), &csr_val, 0x4); + if (ret) { + tool_err("Fail to read 32 bit csr, dest %u addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} + +static int sss_tool_adm_csr_wr32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 val) +{ + int ret; + struct sss_tool_csr_request_adm_data adm_data = {0}; + + if (!hwdev) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_WRITE; + adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NO_RESP_DATA; + adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32; + adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32); + adm_data.dw2.bits.csr_addr = addr; + adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32); + adm_data.csr_write_data_h = 0xffffffff; + adm_data.csr_write_data_l = val; + + ret = sss_adm_msg_write_nack(hwdev, dest, (u8 *)(&adm_data), sizeof(adm_data)); + if (ret) { + tool_err("Fail to write 32 bit csr! dest %u addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} + +static int sss_tool_adm_csr_read(void *hwdev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u32 cnt = 0; + u32 offset = 0; + u32 i; + struct sss_tool_csr_read *rd_msg = (struct sss_tool_csr_read *)buf_in; + u8 node_id = (u8)tool_msg->mpu_cmd.mod; + u32 rd_len = rd_msg->rd_len; + u32 rd_addr = rd_msg->addr; + + if (!buf_in || !buf_out || in_size != sizeof(*rd_msg) || + *out_size != rd_len || rd_len % SSS_TOOL_DW_WIDTH != 0) + return -EINVAL; + + cnt = rd_len / SSS_TOOL_DW_WIDTH; + for (i = 0; i < cnt; i++) { + ret = sss_tool_adm_csr_rd32(hwdev, node_id, rd_addr + offset, + (u32 *)(((u8 *)buf_out) + offset)); + if (ret) { + tool_err("Fail to read csr, err: %d, node_id: %u, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += SSS_TOOL_DW_WIDTH; + } + *out_size = rd_len; + + return ret; +} + +static int sss_tool_adm_csr_write(void *hwdev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u32 cnt = 0; + u32 offset = 0; + u32 i; + struct sss_tool_csr_write *wr_msg = (struct sss_tool_csr_write *)buf_in; + u8 node_id = (u8)tool_msg->mpu_cmd.mod; + u32 rd_len = wr_msg->rd_len; + u32 rd_addr = wr_msg->addr; + u8 *data = NULL; + + if (!buf_in || in_size != sizeof(*wr_msg) || + wr_msg->rd_len % SSS_TOOL_DW_WIDTH != 0) + return -EINVAL; + + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) + return -EFAULT; + + if (copy_from_user(data, (void *)wr_msg->data, rd_len)) { + tool_err("Fail to copy information from user\n"); + kfree(data); + return -EFAULT; + } + + cnt = rd_len / SSS_TOOL_DW_WIDTH; + for (i = 0; i < cnt; i++) { + ret = sss_tool_adm_csr_wr32(hwdev, node_id, rd_addr + offset, + *((u32 *)(data + offset))); + if (ret) { + tool_err("Fail to write csr, ret: %d, node_id: %u, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += SSS_TOOL_DW_WIDTH; + } + + *out_size = 0; + kfree(data); + return ret; +} + +int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u16 cmd = tool_msg->mpu_cmd.cmd; + enum sss_mod_type mod = (enum sss_mod_type)tool_msg->mpu_cmd.mod; + u32 timeout = sss_tool_get_timeout_val(mod, cmd); + void *hwdev = hal_dev->hwdev; + + if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX || + tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_CLP) { + if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX) { + ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + SSS_CHANNEL_DEFAULT); + } else { + ret = sss_tool_send_clp_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + } + + if (ret) { + tool_err("Fail to send msg to mgmt cpu, mod: %d, cmd: %u\n", mod, cmd); + return ret; + } + + } else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_BYPASS) { + if (tool_msg->mpu_cmd.cmd == SSS_TOOL_ADM_MSG_WRITE) + return sss_tool_adm_csr_write(hwdev, tool_msg, buf_in, in_size, + buf_out, out_size); + + ret = sss_tool_adm_csr_read(hwdev, tool_msg, buf_in, in_size, buf_out, out_size); + } else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU) { + if (SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev)) != SSS_SPU_HOST_ID) + ret = sss_sync_send_adm_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout); + else + ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + SSS_CHANNEL_DEFAULT); + + if (ret) { + tool_err("Fail to send adm msg to mgmt cpu, mod: %d, cmd: %u\n", + mod, cmd); + return ret; + } + + } else { + tool_err("Invalid channel %d\n", tool_msg->mpu_cmd.channel); + return -EINVAL; + } + + return ret; +} + +int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u8 cmd = tool_msg->npu_cmd.cmd; + enum sss_mod_type mod = (enum sss_mod_type)tool_msg->npu_cmd.mod; + + if (tool_msg->npu_cmd.direct_resp) { + ret = sss_ctrlq_direct_reply(hal_dev->hwdev, mod, cmd, buf_in, + buf_out, 0, SSS_CHANNEL_DEFAULT); + if (ret) + tool_err("Fail to send direct ctrlq, ret: %d\n", ret); + } else { + ret = sss_ctrlq_sync_cmd_detail_reply(hal_dev->hwdev, mod, cmd, buf_in, buf_out, + NULL, 0, SSS_CHANNEL_DEFAULT); + if (ret) + tool_err("Fail to send detail ctrlq, ret: %d\n", ret); + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h new file mode 100644 index 00000000000000..4dbaed192f85d0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_CHIP_H +#define SSS_TOOL_CHIP_H +#include "sss_hw.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" + +int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); +int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h new file mode 100644 index 00000000000000..b951026a7c9c5b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_HW_H +#define SSS_TOOL_HW_H + +#define SSS_TOOL_CMD_TYPE (0x18) + +#define SSS_TOOL_PF_DEV_MAX 32 +/* Indicates the maximum number of interrupts that can be recorded. + * Subsequent interrupts are not recorded in FFM. + */ +#define SSS_TOOL_FFM_RECORD_MAX 64 + +#define SSS_TOOL_PF_INFO_MAX (16) +#define SSS_TOOL_BUSINFO_LEN (32) + +#define SSS_TOOL_CHIP_FAULT_SIZE (110 * 1024) +#define SSS_TOOL_DRV_BUF_SIZE_MAX 4096 + +/* dbgtool command type */ +/* You can add commands as required. The dbgtool command can be + * used to invoke all interfaces of the kernel-mode x86 driver. + */ +enum sss_tool_dbg_cmd { + SSS_TOOL_DBG_CMD_API_RD = 0, + SSS_TOOL_DBG_CMD_API_WR, + SSS_TOOL_DBG_CMD_FFM_RD, + SSS_TOOL_DBG_CMD_FFM_CLR, + SSS_TOOL_DBG_CMD_PF_DEV_INFO_GET, + SSS_TOOL_DBG_CMD_MSG_2_UP, + SSS_TOOL_DBG_CMD_FREE_MEM, + SSS_TOOL_DBG_CMD_NUM +}; + +enum module_name { + SSS_TOOL_MSG_TO_NPU = 1, + SSS_TOOL_MSG_TO_MPU, + SSS_TOOL_MSG_TO_SM, + SSS_TOOL_MSG_TO_HW_DRIVER, +#define SSS_TOOL_MSG_TO_SRV_DRV_BASE (SSS_TOOL_MSG_TO_HW_DRIVER + 1) + SSS_TOOL_MSG_TO_NIC_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE, + SSS_TOOL_MSG_TO_OVS_DRIVER, + SSS_TOOL_MSG_TO_ROCE_DRIVER, + SSS_TOOL_MSG_TO_TOE_DRIVER, + SSS_TOOL_MSG_TO_IOE_DRIVER, + SSS_TOOL_MSG_TO_FC_DRIVER, + SSS_TOOL_MSG_TO_VBS_DRIVER, + SSS_TOOL_MSG_TO_IPSEC_DRIVER, + SSS_TOOL_MSG_TO_VIRTIO_DRIVER, + SSS_TOOL_MSG_TO_MIGRATE_DRIVER, + SSS_TOOL_MSG_TO_PPA_DRIVER, + SSS_TOOL_MSG_TO_CUSTOM_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 11, + SSS_TOOL_MSG_TO_DRIVER_MAX = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 15, /* reserved */ +}; + +enum sss_tool_adm_msg_type { + SSS_TOOL_ADM_MSG_READ, + SSS_TOOL_ADM_MSG_WRITE +}; + +enum sss_tool_sm_cmd_type { + SSS_TOOL_SM_CMD_RD16 = 1, + SSS_TOOL_SM_CMD_RD32, + SSS_TOOL_SM_CMD_RD64_PAIR, + SSS_TOOL_SM_CMD_RD64, + SSS_TOOL_SM_CMD_RD32_CLEAR, + SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR, + SSS_TOOL_SM_CMD_RD64_CLEAR +}; + +enum sss_tool_channel_type { + SSS_TOOL_CHANNEL_MBOX = 1, + SSS_TOOL_CHANNEL_ADM_MSG_BYPASS, + SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU, + SSS_TOOL_CHANNEL_CLP, +}; + +struct sss_tool_api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct sss_tool_api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct sss_tool_pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +struct sss_tool_ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +struct sss_tool_ffm_intr_tm_info { + struct sss_tool_ffm_intr_info intr_info; + u8 times; + u8 sec; + u8 min; + u8 hour; + u8 mday; + u8 mon; + u16 year; +}; + +struct sss_tool_ffm_record_info { + u32 ffm_num; + u32 last_err_csr_addr; + u32 last_err_csr_value; + struct sss_tool_ffm_intr_tm_info ffm[SSS_TOOL_FFM_RECORD_MAX]; +}; + +struct sss_tool_knl_dbg_info { + struct semaphore dbgtool_sem; + struct sss_tool_ffm_record_info *ffm; +}; + +struct sss_tool_msg_to_up { + u8 pf_id; + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct sss_tool_dbg_param { + union { + struct sss_tool_api_cmd_rd api_rd; + struct sss_tool_api_cmd_wr api_wr; + struct sss_tool_pf_dev_info *dev_info; + struct sss_tool_ffm_record_info *ffm_rd; + struct sss_tool_msg_to_up msg2up; + } param; + char chip_name[16]; +}; + +struct sss_tool_pf { + char name[IFNAMSIZ]; + char bus_info[SSS_TOOL_BUSINFO_LEN]; + u32 pf_type; +}; + +struct sss_tool_card_info { + struct sss_tool_pf pf[SSS_TOOL_PF_INFO_MAX]; + u32 pf_num; +}; + +struct sss_tool_pf_info { + u32 valid; + u32 pf_id; +}; + +struct sss_tool_cmd_chip_fault_stats { + u32 offset; + u8 chip_fault_stats[SSS_TOOL_DRV_BUF_SIZE_MAX]; +}; + +struct sss_tool_npu_msg { + u32 mod : 8; + u32 cmd : 8; + u32 ack_type : 3; + u32 direct_resp : 1; + u32 len : 12; +}; + +struct sss_tool_mpu_msg { + u32 channel : 8; + u32 mod : 8; + u32 cmd : 16; +}; + +struct sss_tool_msg { + char device_name[IFNAMSIZ]; + u32 module; + union { + u32 msg_formate; /* for driver */ + struct sss_tool_npu_msg npu_cmd; + struct sss_tool_mpu_msg mpu_cmd; + }; + u32 timeout; /* for mpu/npu cmd */ + u32 func_id; + u32 buf_in_size; + u32 buf_out_size; + void *in_buf; + void *out_buf; + int bus_num; + u8 port_id; + u8 rsvd1[3]; + u32 rsvd2[4]; +}; + +#endif /* SSS_TOOL_HW_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c new file mode 100644 index 00000000000000..8173d76cf7f55f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_adapter_mgmt.h" +#include "sss_linux_kernel.h" +#include "sss_hw.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" +#include "sss_tool.h" + +#define SSS_TOOL_DEV_PATH "/dev/sssnic_nictool_dev" +#define SSS_TOOL_DEV_CLASS "sssnic_nictool_class" +#define SSS_TOOL_DEV_NAME "sssnic_nictool_dev" + +#define SSS_TOOL_CTRLQ_BUF_SIZE_MAX 2048U +#define SSS_TOOL_MSG_IN_SIZE_MAX (2048 * 1024) +#define SSS_TOOL_MSG_OUT_SIZE_MAX (2048 * 1024) +#define SSS_TOOL_BUF_SIZE_MAX (2048 * 1024) + +typedef int (*sss_tool_deal_handler_fun)(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len); + +struct sss_tool_deal_handler { + enum module_name msg_name; + sss_tool_deal_handler_fun func; +}; + +static int g_nictool_ref_cnt; + +static dev_t g_dev_id = {0}; + +static struct class *g_nictool_class; +static struct cdev g_nictool_cdev; + +static void *g_card_node_array[SSS_TOOL_CARD_MAX] = {0}; +void *g_card_va[SSS_TOOL_CARD_MAX] = {0}; +u64 g_card_pa[SSS_TOOL_CARD_MAX] = {0}; +int g_card_id; + +static int sss_tool_msg_to_nic(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int ret = -EINVAL; + void *uld_dev = NULL; + enum sss_service_type service_type; + struct sss_uld_info *uld_info = sss_get_uld_info(); + + service_type = tool_msg->module - SSS_TOOL_MSG_TO_SRV_DRV_BASE; + if (service_type >= SSS_SERVICE_TYPE_MAX) { + tool_err("Invalid input module id: %u\n", tool_msg->module); + return -EINVAL; + } + + uld_dev = sss_get_uld_dev(hal_dev, service_type); + if (!uld_dev) { + if (tool_msg->msg_formate == SSS_TOOL_GET_DRV_VERSION) + return 0; + + tool_err("Fail to get uld device\n"); + return -EINVAL; + } + + if (uld_info[service_type].ioctl) + ret = uld_info[service_type].ioctl(uld_dev, tool_msg->msg_formate, + in_buf, in_len, out_buf, out_len); + sss_uld_dev_put(hal_dev, service_type); + + return ret; +} + +static void sss_tool_free_in_buf(void *hwdev, const struct sss_tool_msg *tool_msg, void *in_buf) +{ + if (!in_buf) + return; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU) + sss_free_ctrlq_msg_buf(hwdev, in_buf); + else + kfree(in_buf); +} + +static void sss_tool_free_out_buf(void *hwdev, struct sss_tool_msg *tool_msg, + void *out_buf) +{ + if (!out_buf) + return; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && + !tool_msg->npu_cmd.direct_resp) + sss_free_ctrlq_msg_buf(hwdev, out_buf); + else + kfree(out_buf); +} + +static int sss_tool_alloc_in_buf(void *hwdev, struct sss_tool_msg *tool_msg, + u32 in_len, void **in_buf) +{ + void *msg_buf = NULL; + + if (!in_len) + return 0; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU) { + struct sss_ctrl_msg_buf *cmd_buf = NULL; + + if (in_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) { + tool_err("Invalid ctrlq in len(%u) more than %u\n", + in_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX); + return -ENOMEM; + } + + cmd_buf = sss_alloc_ctrlq_msg_buf(hwdev); + if (!cmd_buf) { + tool_err("Fail to alloc ctrlq msg buf\n"); + return -ENOMEM; + } + *in_buf = (void *)cmd_buf; + cmd_buf->size = (u16)in_len; + } else { + if (in_len > SSS_TOOL_MSG_IN_SIZE_MAX) { + tool_err("Invalid in len(%u) more than %u\n", + in_len, SSS_TOOL_MSG_IN_SIZE_MAX); + return -ENOMEM; + } + msg_buf = kzalloc(in_len, GFP_KERNEL); + *in_buf = msg_buf; + } + + if (!(*in_buf)) { + tool_err("Fail to alloc in buf\n"); + return -ENOMEM; + } + + return 0; +} + +static int sss_tool_alloc_out_buf(void *hwdev, struct sss_tool_msg *tool_msg, + u32 out_len, void **out_buf) +{ + if (!out_len) { + tool_info("out len is 0, need not alloc buf\n"); + return 0; + } + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && + !tool_msg->npu_cmd.direct_resp) { + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (out_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) { + tool_err("Invalid ctrlq out len(%u) more than %u\n", + out_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX); + return -ENOMEM; + } + + msg_buf = sss_alloc_ctrlq_msg_buf(hwdev); + *out_buf = (void *)msg_buf; + } else { + if (out_len > SSS_TOOL_MSG_OUT_SIZE_MAX) { + tool_err("Invalid out len(%u) more than %u\n", + out_len, SSS_TOOL_MSG_OUT_SIZE_MAX); + return -ENOMEM; + } + *out_buf = kzalloc(out_len, GFP_KERNEL); + } + if (!(*out_buf)) { + tool_err("Fail to alloc out buf\n"); + return -ENOMEM; + } + + return 0; +} + +static int sss_tool_copy_to_user(struct sss_tool_msg *tool_msg, + u32 out_len, void *out_buf) +{ + void *out_msg = NULL; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && !tool_msg->npu_cmd.direct_resp) { + out_msg = ((struct sss_ctrl_msg_buf *)out_buf)->buf; + if (copy_to_user(tool_msg->out_buf, out_msg, out_len)) + return -EFAULT; + return 0; + } + + if (copy_to_user(tool_msg->out_buf, out_buf, out_len)) + return -EFAULT; + + return 0; +} + +static int sss_tool_alloc_buf(void *hwdev, struct sss_tool_msg *tool_msg, u32 in_len, + void **in_buf, u32 out_len, void **out_buf) +{ + int ret; + + ret = sss_tool_alloc_in_buf(hwdev, tool_msg, in_len, in_buf); + if (ret) { + tool_err("Fail to alloc tool msg in buf\n"); + return ret; + } + + if (copy_from_user(*in_buf, tool_msg->in_buf, in_len)) { + tool_err("Fail to copy tool_msg to in buf\n"); + sss_tool_free_in_buf(hwdev, tool_msg, *in_buf); + return -EFAULT; + } + + ret = sss_tool_alloc_out_buf(hwdev, tool_msg, out_len, out_buf); + if (ret) { + tool_err("Fail to alloc tool msg out buf\n"); + goto alloc_out_buf_err; + } + + return 0; + +alloc_out_buf_err: + sss_tool_free_in_buf(hwdev, tool_msg, *in_buf); + + return ret; +} + +static void sss_tool_free_buf(void *hwdev, struct sss_tool_msg *tool_msg, + void *in_buf, void *out_buf) +{ + sss_tool_free_out_buf(hwdev, tool_msg, out_buf); + sss_tool_free_in_buf(hwdev, tool_msg, in_buf); +} + +const struct sss_tool_deal_handler g_deal_msg_handle[] = { + {SSS_TOOL_MSG_TO_NPU, sss_tool_msg_to_npu}, + {SSS_TOOL_MSG_TO_MPU, sss_tool_msg_to_mpu}, + {SSS_TOOL_MSG_TO_SM, sss_tool_msg_to_sm}, + {SSS_TOOL_MSG_TO_HW_DRIVER, sss_tool_msg_to_hw}, + {SSS_TOOL_MSG_TO_NIC_DRIVER, sss_tool_msg_to_nic} +}; + +static int sss_tool_deal_cmd(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int ret = 0; + int index; + int msg_num = ARRAY_LEN(g_deal_msg_handle); + + for (index = 0; index < msg_num; index++) { + if (tool_msg->module != g_deal_msg_handle[index].msg_name) + continue; + + ret = g_deal_msg_handle[index].func(hal_dev, tool_msg, + in_buf, in_len, out_buf, out_len); + break; + } + + if (index == msg_num) + ret = sss_tool_msg_to_nic(hal_dev, tool_msg, + in_buf, in_len, out_buf, out_len); + + return ret; +} + +static struct sss_hal_dev *sss_tool_get_hal_dev_by_msg(struct sss_tool_msg *tool_msg) +{ + struct sss_hal_dev *hal_dev = NULL; + + if (tool_msg->module >= SSS_TOOL_MSG_TO_SRV_DRV_BASE && + tool_msg->module < SSS_TOOL_MSG_TO_DRIVER_MAX && + tool_msg->msg_formate != SSS_TOOL_GET_DRV_VERSION) { + hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name, + tool_msg->module - + SSS_TOOL_MSG_TO_SRV_DRV_BASE); + } else { + hal_dev = sss_get_lld_dev_by_chip_name(tool_msg->device_name); + if (!hal_dev) + hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name, + SSS_SERVICE_TYPE_MAX); + } + + if (tool_msg->module == SSS_TOOL_MSG_TO_NIC_DRIVER && + (tool_msg->msg_formate == SSS_TOOL_GET_XSFP_INFO || + tool_msg->msg_formate == SSS_TOOL_GET_XSFP_PRESENT)) + hal_dev = sss_get_lld_dev_by_chip_and_port(tool_msg->device_name, + tool_msg->port_id); + + return hal_dev; +} + +static int sss_tool_check_msg_valid(struct sss_tool_msg *tool_msg) +{ + if (tool_msg->buf_out_size > SSS_TOOL_BUF_SIZE_MAX || + tool_msg->buf_in_size > SSS_TOOL_BUF_SIZE_MAX) { + tool_err("Invalid in buf len: %u or out buf len: %u\n", + tool_msg->buf_in_size, tool_msg->buf_out_size); + return -EFAULT; + } + + return 0; +} + +static long sss_tool_msg_ioctl(unsigned long arg) +{ + int ret = 0; + u32 in_len = 0; + u32 expect_out_len = 0; + u32 out_len = 0; + void *in_buf = NULL; + void *out_buf = NULL; + struct sss_hal_dev *hal_dev = NULL; + struct sss_tool_msg tool_msg = {0}; + + if (copy_from_user(&tool_msg, (void *)arg, sizeof(tool_msg))) { + tool_err("Fail to copy msg from user space\n"); + return -EFAULT; + } + + if (sss_tool_check_msg_valid(&tool_msg)) { + tool_err("Fail to check msg valid\n"); + return -EFAULT; + } + + tool_msg.device_name[IFNAMSIZ - 1] = '\0'; + expect_out_len = tool_msg.buf_out_size; + in_len = tool_msg.buf_in_size; + + hal_dev = sss_tool_get_hal_dev_by_msg(&tool_msg); + if (!hal_dev) { + if (tool_msg.msg_formate != SSS_TOOL_DEV_NAME_TEST) + tool_err("Fail to find device %s for module %d\n", + tool_msg.device_name, tool_msg.module); + return -ENODEV; + } + + if (tool_msg.msg_formate == SSS_TOOL_DEV_NAME_TEST) + return 0; + + ret = sss_tool_alloc_buf(hal_dev->hwdev, &tool_msg, + in_len, &in_buf, expect_out_len, &out_buf); + if (ret) { + tool_err("Fail to alloc cmd buf\n"); + goto out_free_lock; + } + + out_len = expect_out_len; + + ret = sss_tool_deal_cmd(hal_dev, &tool_msg, in_buf, in_len, out_buf, &out_len); + if (ret) { + tool_err("Fail to execute cmd, module: %u, ret: %d.\n", tool_msg.module, ret); + goto out_free_buf; + } + + if (out_len > expect_out_len) { + ret = -EFAULT; + tool_err("Fail to execute cmd, expected out len from user: %u, out len: %u\n", + expect_out_len, out_len); + goto out_free_buf; + } + + ret = sss_tool_copy_to_user(&tool_msg, out_len, out_buf); + if (ret) + tool_err("Fail to copy return information to user space\n"); + +out_free_buf: + sss_tool_free_buf(hal_dev->hwdev, &tool_msg, in_buf, out_buf); + +out_free_lock: + lld_dev_put(hal_dev); + return (long)ret; +} + +static long sss_tool_knl_ffm_info_rd(struct sss_tool_dbg_param *dbg_param, + struct sss_tool_knl_dbg_info *dbg_info) +{ + if (copy_to_user(dbg_param->param.ffm_rd, dbg_info->ffm, + (unsigned int)sizeof(*dbg_param->param.ffm_rd))) { + tool_err("Fail to copy ffm_info to user space\n"); + return -EFAULT; + } + + return 0; +} + +static struct sss_card_node *sss_tool_find_card_node(char *chip_name) +{ + int i; + struct sss_card_node *card_node = NULL; + + for (i = 0; i < SSS_TOOL_CARD_MAX; i++) { + card_node = (struct sss_card_node *)g_card_node_array[i]; + if (!card_node) + continue; + if (!strncmp(chip_name, card_node->chip_name, IFNAMSIZ)) + break; + } + if (i == SSS_TOOL_CARD_MAX || !card_node) + return NULL; + + g_card_id = i; + + return card_node; +} + +static long sss_tool_dbg_ioctl(unsigned int cmd_type, unsigned long arg) +{ + struct sss_tool_knl_dbg_info *dbg_info = NULL; + struct sss_card_node *card_node = NULL; + struct sss_tool_dbg_param param = {0}; + long ret; + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + tool_err("Fail to copy msg param from user\n"); + return -EFAULT; + } + + sss_hold_chip_node(); + + card_node = sss_tool_find_card_node(param.chip_name); + if (!card_node) { + sss_put_chip_node(); + tool_err("Fail to find card node %s\n", param.chip_name); + return -EFAULT; + } + + dbg_info = (struct sss_tool_knl_dbg_info *)card_node->dbgtool_info; + + down(&dbg_info->dbgtool_sem); + + if (cmd_type == SSS_TOOL_DBG_CMD_FFM_RD) { + ret = sss_tool_knl_ffm_info_rd(¶m, dbg_info); + } else if (cmd_type == SSS_TOOL_DBG_CMD_MSG_2_UP) { + tool_info("cmd(0x%x) not suppose.\n", cmd_type); + ret = 0; + } else { + tool_err("Fail to execute cmd(0x%x) ,it is not support\n", cmd_type); + ret = -EFAULT; + } + + up(&dbg_info->dbgtool_sem); + + sss_put_chip_node(); + + return ret; +} + +static int sss_tool_release(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static int sss_tool_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t sss_tool_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t sss_tool_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static long sss_tool_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + unsigned int cmd_type = _IOC_NR(cmd); + + if (cmd_type == SSS_TOOL_CMD_TYPE) + return sss_tool_msg_ioctl(arg); + + return sss_tool_dbg_ioctl(cmd_type, arg); +} + +static int sss_tool_mem_mmap(struct file *filp, struct vm_area_struct *mem_area) +{ + unsigned long mem_size = mem_area->vm_end - mem_area->vm_start; + phys_addr_t offset = (phys_addr_t)mem_area->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (mem_size > SSS_TOOL_MEM_MAP_SIZE) { + tool_err("Fail to map mem, mem_size :%ld, alloc size: %ld\n", + mem_size, SSS_TOOL_MEM_MAP_SIZE); + return -EAGAIN; + } + + phy_addr = offset ? offset : g_card_pa[g_card_id]; + if (!phy_addr) { + tool_err("Fail to map mem, card_id = %d phy_addr is 0\n", g_card_id); + return -EAGAIN; + } + + mem_area->vm_page_prot = pgprot_noncached(mem_area->vm_page_prot); + if (remap_pfn_range(mem_area, mem_area->vm_start, (phy_addr >> PAGE_SHIFT), + mem_size, mem_area->vm_page_prot)) { + tool_err("Fail to remap pfn range.\n"); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations sss_tool_file_ops = { + .owner = THIS_MODULE, + .release = sss_tool_release, + .open = sss_tool_open, + .read = sss_tool_read, + .write = sss_tool_write, + .unlocked_ioctl = sss_tool_unlocked_ioctl, + .mmap = sss_tool_mem_mmap, +}; + +static struct sss_tool_knl_dbg_info *sss_tool_alloc_dbg_info(void *hwdev) +{ + struct sss_tool_knl_dbg_info *dbg_info = NULL; + + dbg_info = (struct sss_tool_knl_dbg_info *) + kzalloc(sizeof(struct sss_tool_knl_dbg_info), GFP_KERNEL); + if (!dbg_info) + return NULL; + + dbg_info->ffm = (struct sss_tool_ffm_record_info *) + kzalloc(sizeof(*dbg_info->ffm), GFP_KERNEL); + if (!dbg_info->ffm) { + tool_err("Fail to alloc ffm_record_info\n"); + kfree(dbg_info); + return NULL; + } + + return dbg_info; +} + +static void sss_tool_free_dbg_info(struct sss_tool_knl_dbg_info *dbg_info) +{ + kfree(dbg_info->ffm); + kfree(dbg_info); +} + +static int sss_tool_get_node_id(struct sss_card_node *card_node, int *node_id) +{ + int ret; + + ret = sscanf(card_node->chip_name, SSS_CHIP_NAME "%d", node_id); + if (ret < 0) { + tool_err("Fail to get card id\n"); + return -ENOMEM; + } + + return 0; +} + +static int sss_tool_add_func_to_card_node(void *hwdev, struct sss_card_node *card_node) +{ + int func_id = sss_get_func_id(hwdev); + struct sss_tool_knl_dbg_info *dbg_info = NULL; + int ret; + int node_id; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = hwdev; + + if (card_node->func_num++) + return 0; + + dbg_info = sss_tool_alloc_dbg_info(hwdev); + if (!dbg_info) { + ret = -ENOMEM; + tool_err("Fail to alloc dbg_info\n"); + goto alloc_dbg_info_err; + } + card_node->dbgtool_info = dbg_info; + sema_init(&dbg_info->dbgtool_sem, 1); + + ret = sss_tool_get_node_id(card_node, &node_id); + if (ret) { + tool_err("Fail to add node to global array\n"); + goto get_node_id_err; + } + g_card_node_array[node_id] = card_node; + + return 0; + +get_node_id_err: + sss_tool_free_dbg_info(dbg_info); + card_node->dbgtool_info = NULL; + +alloc_dbg_info_err: + card_node->func_num--; + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = NULL; + + return ret; +} + +static void sss_tool_del_func_in_card_node(void *hwdev, struct sss_card_node *card_node) +{ + struct sss_tool_knl_dbg_info *dbg_info = card_node->dbgtool_info; + int func_id = sss_get_func_id(hwdev); + int node_id; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = NULL; + + if (--card_node->func_num) + return; + + sss_tool_get_node_id(card_node, &node_id); + if (node_id < SSS_TOOL_CARD_MAX) + g_card_node_array[node_id] = NULL; + + sss_tool_free_dbg_info(dbg_info); + card_node->dbgtool_info = NULL; + + if (node_id < SSS_TOOL_CARD_MAX) + (void)sss_tool_free_card_mem(node_id); +} + +static int sss_tool_create_dev(void) +{ + int ret; + struct device *pdevice = NULL; + + ret = alloc_chrdev_region(&g_dev_id, 0, 1, SSS_TOOL_DEV_NAME); + if (ret) { + tool_err("Fail to alloc sssnic_nictool_dev region(0x%x)\n", ret); + return ret; + } + +#ifdef CLASS_CREATE_WITH_ONE_PARAM + g_nictool_class = class_create(SSS_TOOL_DEV_CLASS); +#else + g_nictool_class = class_create(THIS_MODULE, SSS_TOOL_DEV_CLASS); +#endif + if (IS_ERR(g_nictool_class)) { + tool_err("Fail to create sssnic_nictool_class\n"); + ret = -EFAULT; + goto create_class_err; + } + + cdev_init(&g_nictool_cdev, &sss_tool_file_ops); + + ret = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (ret < 0) { + tool_err("Fail to add sssnic_nictool_dev to operating system (0x%x)\n", ret); + goto add_cdev_err; + } + + pdevice = device_create(g_nictool_class, NULL, g_dev_id, NULL, SSS_TOOL_DEV_NAME); + if (IS_ERR(pdevice)) { + tool_err("Fail to create sssnic_nictool_dev on operating system\n"); + ret = -EFAULT; + goto create_device_err; + } + + tool_info("Success to register sssnic_nictool_dev to system\n"); + + return 0; + +create_device_err: + cdev_del(&g_nictool_cdev); + +add_cdev_err: + class_destroy(g_nictool_class); + +create_class_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +static void sss_tool_destroy_dev(void) +{ + device_destroy(g_nictool_class, g_dev_id); + cdev_del(&g_nictool_cdev); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + tool_info("Success to unregister sssnic_nictool_dev to system\n"); +} + +int sss_tool_init(void *hwdev, void *chip_node) +{ + struct sss_card_node *card_node = (struct sss_card_node *)chip_node; + int ret; + + ret = sss_tool_add_func_to_card_node(hwdev, card_node); + if (ret) { + tool_err("Fail to add func to card node\n"); + return ret; + } + + if (g_nictool_ref_cnt++) { + tool_info("sssnic_nictool_dev has already create\n"); + return 0; + } + + ret = sss_tool_create_dev(); + if (ret) { + tool_err("Fail to create sssnic_nictool_dev\n"); + goto out; + } + + return 0; + +out: + g_nictool_ref_cnt--; + sss_tool_del_func_in_card_node(hwdev, card_node); + + return ret; +} + +void sss_tool_uninit(void *hwdev, void *chip_node) +{ + struct sss_card_node *chip_info = (struct sss_card_node *)chip_node; + + sss_tool_del_func_in_card_node(hwdev, chip_info); + + if (g_nictool_ref_cnt == 0) + return; + + if (--g_nictool_ref_cnt) + return; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) { + tool_err("Fail to uninit sssnictool, tool class is NULL.\n"); + return; + } + + sss_tool_destroy_dev(); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c new file mode 100644 index 00000000000000..b1105e518484a4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_linux_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_tool.h" +#include "sss_csr.h" +#include "sss_adapter_mgmt.h" +#include "sss_mgmt_info.h" +#include "sss_pci_global.h" +#include "sss_hwif_api.h" + +typedef int (*sss_tool_hw_cmd_func)(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct sss_tool_hw_cmd_handle { + enum sss_tool_driver_cmd_type cmd_type; + sss_tool_hw_cmd_func func; +}; + +static int sss_tool_get_func_type(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(u16) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16)); + return -EFAULT; + } + + *(u16 *)buf_out = (u16)sss_get_func_type(SSS_TO_HWDEV(hal_dev)); + + return 0; +} + +static int sss_tool_get_func_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(u16) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16)); + return -EFAULT; + } + + *(u16 *)buf_out = (u16)sss_get_func_id(SSS_TO_HWDEV(hal_dev)); + + return 0; +} + +static int sss_tool_get_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = hal_dev->hwdev; + struct sss_card_node *node = hwdev->chip_node; + struct sss_hw_stats *stats = buf_out; + struct sss_hw_stats *tmp = stats; + + if (!hwdev) + return -EINVAL; + + if (*out_size != sizeof(struct sss_hw_stats) || !stats) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(struct sss_hw_stats)); + return -EFAULT; + } + + memcpy(stats, &hwdev->hw_stats, sizeof(struct sss_hw_stats)); + + atomic_set(&tmp->nic_ucode_event_stats[SSS_CHN_BUSY], + atomic_read(&node->channel_timeout_cnt)); + + return 0; +} + +static int sss_tool_clear_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = hal_dev->hwdev; + struct sss_card_node *node = hwdev->chip_node; + + memset((void *)&hwdev->hw_stats, 0, sizeof(struct sss_hw_stats)); + memset((void *)hwdev->chip_fault_stats, 0, SSS_TOOL_CHIP_FAULT_SIZE); + + if (SSS_SUPPORT_CHANNEL_DETECT(hwdev) && atomic_read(&node->channel_timeout_cnt)) { + atomic_set(&node->channel_timeout_cnt, 0); + hwdev->aeq_busy_cnt = 0; +#if !defined(__UEFI__) && !defined(VMWARE) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSSNIC_CHANNEL_DETECT_PERIOD)); +#endif + } + + if (*out_size != sizeof(struct sss_hw_stats)) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(struct sss_hw_stats)); + return -EFAULT; + } + + return 0; +} + +static int sss_tool_get_self_test_result(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 val; + + if (*out_size != sizeof(u32) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + + val = sss_chip_read_reg(SSS_TO_HWIF(hal_dev->hwdev), SSS_MGMT_HEALTH_STATUS_ADDR); + *(u32 *)buf_out = val; + + return 0; +} + +static void sss_tool_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset) +{ + u32 size; + + if (offset >= SSS_TOOL_CHIP_FAULT_SIZE) { + tool_err("Invalid chip offset value: %d\n", offset); + return; + } + + size = min(SSS_TOOL_DRV_BUF_SIZE_MAX, SSS_TOOL_CHIP_FAULT_SIZE - (int)offset); + memcpy(chip_fault_stats, ((struct sss_hwdev *)hwdev)->chip_fault_stats + + offset, size); +} + +static int sss_tool_get_chip_faults_stats(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 offset = 0; + struct sss_tool_cmd_chip_fault_stats *info = NULL; + + if (!buf_in || !buf_out || *out_size != sizeof(*info) || + in_size != sizeof(*info)) { + tool_err("Invalid out_size from user: %d, expect: %lu\n", *out_size, sizeof(*info)); + return -EFAULT; + } + info = (struct sss_tool_cmd_chip_fault_stats *)buf_in; + offset = info->offset; + + info = (struct sss_tool_cmd_chip_fault_stats *)buf_out; + sss_tool_get_chip_fault_stats(hal_dev->hwdev, + info->chip_fault_stats, offset); + + return 0; +} + +static int sss_tool_get_single_card_info(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (!buf_out || *out_size != sizeof(struct sss_tool_card_info)) { + tool_err("Invalid buf out is NULL, or out_size != %lu\n", + sizeof(struct sss_tool_card_info)); + return -EINVAL; + } + + sss_get_card_info(hal_dev->hwdev, buf_out); + + return 0; +} + +static int sss_tool_is_driver_in_vm(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!buf_out || (*out_size != sizeof(u8))) { + tool_err("Invalid parameter, buf_out is NULL or out_size != %lu\n", sizeof(u8)); + return -EINVAL; + } + + *((u8 *)buf_out) = sss_is_in_host() ? 0 : 1; + + return 0; +} + +static int sss_tool_get_all_chip_id_cmd(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(struct sss_card_id) || !buf_out) { + tool_err("Invalid parameter: out_size %u, expect %lu\n", + *out_size, sizeof(struct sss_card_id)); + return -EFAULT; + } + + sss_get_all_chip_id(buf_out); + + return 0; +} + +static int sss_tool_get_card_id(char *dev_name, int *id) +{ + int ret; + + ret = sscanf(dev_name, SSS_CHIP_NAME "%d", id); + if (ret < 0) { + tool_err("Fail to get card id\n"); + return ret; + } + + if (*id >= SSS_TOOL_CARD_MAX || *id < 0) { + tool_err("Invalid chip id %d, out of range: [0-%d]\n", *id, SSS_TOOL_CARD_MAX - 1); + return -EINVAL; + } + + return 0; +} + +static void sss_tool_get_pf_dev_info_param(struct sss_tool_pf_dev_info *dev_info, int card_id, + void **func_array) +{ + u32 func_id; + void *hwdev = NULL; + struct pci_dev *pdev = NULL; + + for (func_id = 0; func_id < SSS_TOOL_PF_DEV_MAX; func_id++) { + hwdev = (void *)func_array[func_id]; + + dev_info[func_id].phy_addr = g_card_pa[card_id]; + + if (!hwdev) { + dev_info[func_id].bar0_size = 0; + dev_info[func_id].bus = 0; + dev_info[func_id].slot = 0; + dev_info[func_id].func = 0; + } else { + pdev = (struct pci_dev *)sss_get_pcidev_hdl(hwdev); + dev_info[func_id].bar0_size = pci_resource_len(pdev, 0); + dev_info[func_id].bus = pdev->bus->number; + dev_info[func_id].slot = PCI_SLOT(pdev->devfn); + dev_info[func_id].func = PCI_FUNC(pdev->devfn); + } + } +} + +static int sss_tool_get_card_adm_mem(int card_id) +{ + int i; + unsigned char *card_va = NULL; + + g_card_id = card_id; + if (!g_card_va[card_id]) { + g_card_va[card_id] = + (void *)__get_free_pages(GFP_KERNEL, SSS_TOOL_PAGE_ORDER); + if (!g_card_va[card_id]) { + tool_err("Fail to alloc adm memory for card %d!\n", card_id); + return -EFAULT; + } + + memset(g_card_va[card_id], 0, PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER)); + + g_card_pa[card_id] = virt_to_phys(g_card_va[card_id]); + if (!g_card_pa[card_id]) { + tool_err("Invalid phy addr for card %d is 0\n", card_id); + free_pages((unsigned long)g_card_va[card_id], SSS_TOOL_PAGE_ORDER); + g_card_va[card_id] = NULL; + return -EFAULT; + } + + card_va = g_card_va[card_id]; + for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(card_va)); + card_va += PAGE_SIZE; + } + } + + return 0; +} + +static int sss_tool_get_pf_dev_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int id; + int ret; + struct sss_tool_pf_dev_info *info = buf_out; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + if (!buf_out || *out_size != sizeof(struct sss_tool_pf_dev_info) * SSS_TOOL_PF_DEV_MAX) { + tool_err("Invalid param: out_size %u, expect %lu\n", + *out_size, sizeof(info) * SSS_TOOL_PF_DEV_MAX); + return -EFAULT; + } + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_tool_get_pf_dev_info_param(info, id, node->func_handle_array); + + ret = sss_tool_get_card_adm_mem(id); + if (ret) { + tool_err("Fail to get adm memory for userspace %s\n", node->chip_name); + return -EFAULT; + } + + return 0; +} + +long sss_tool_free_card_mem(int id) +{ + unsigned char *va = NULL; + int i; + + if (!g_card_va[id]) + return 0; + + va = g_card_va[id]; + for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(va)); + va += PAGE_SIZE; + } + + free_pages((unsigned long)g_card_va[id], SSS_TOOL_PAGE_ORDER); + g_card_va[id] = NULL; + g_card_pa[id] = 0; + + return 0; +} + +static int sss_tool_free_all_card_mem(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int id; + int ret; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_tool_free_card_mem(id); + + return 0; +} + +static int sss_tool_check_card_info_param(char *dev_name, const void *buf_out, u32 out_size) +{ + int ret; + + if (!buf_out || out_size != sizeof(struct sss_card_func_info)) { + tool_err("Invalid out_size %u, expect %lu\n", + out_size, sizeof(struct sss_card_func_info)); + return -EINVAL; + } + + ret = memcmp(dev_name, SSS_CHIP_NAME, strlen(SSS_CHIP_NAME)); + if (ret) { + tool_err("Invalid chip name %s\n", dev_name); + return ret; + } + + return 0; +} + +static int sss_tool_get_card_func_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int ret; + int id = 0; + struct sss_card_func_info *info = buf_out; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + ret = sss_tool_check_card_info_param(node->chip_name, buf_out, *out_size); + if (ret) + return ret; + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_get_card_func_info(node->chip_name, info); + + if (!info->pf_num) { + tool_err("Fail to get card func info, chip name %s\n", node->chip_name); + return -EFAULT; + } + + ret = sss_tool_get_card_adm_mem(id); + if (ret) { + tool_err("Fail to get adm memory for userspace %s\n", node->chip_name); + return -EFAULT; + } + + info->usr_adm_pa = g_card_pa[id]; + + return 0; +} + +static int sss_tool_get_pf_cap_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = NULL; + struct sss_card_node *node = sss_get_card_node(hal_dev); + struct sss_svc_cap_info *in_info = (struct sss_svc_cap_info *)buf_in; + struct sss_svc_cap_info *out_info = (struct sss_svc_cap_info *)buf_out; + + if (*out_size != sizeof(struct sss_svc_cap_info) || + in_size != sizeof(struct sss_svc_cap_info) || + !buf_in || !buf_out) { + tool_err("Invalid out_size %u, in_size: %u, expect %lu\n", + *out_size, in_size, sizeof(struct sss_svc_cap_info)); + return -EINVAL; + } + + if (in_info->func_id >= SSS_MAX_FUNC) { + tool_err("Invalid func id: %u, max_num: %u\n", + in_info->func_id, SSS_MAX_FUNC); + return -EINVAL; + } + + sss_hold_chip_node(); + hwdev = (struct sss_hwdev *)(node->func_handle_array)[in_info->func_id]; + if (!hwdev) { + sss_put_chip_node(); + return -EINVAL; + } + + memcpy(&out_info->cap, SSS_TO_SVC_CAP(hwdev), sizeof(struct sss_service_cap)); + sss_put_chip_node(); + + return 0; +} + +static int sss_tool_get_hw_drv_version(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int ret; + struct sss_tool_drv_version_info *info = buf_out; + + if (!buf_out || *out_size != sizeof(*info)) { + tool_err("Invalid param, buf_out is NULL or out_size:%u, expect: %lu\n", + *out_size, sizeof(*info)); + return -EINVAL; + } + + ret = snprintf(info->ver, sizeof(info->ver), "%s %s", SSS_DRV_VERSION, + __TIME_STR__); + if (ret < 0) + return -EINVAL; + + return 0; +} + +static int sss_tool_get_pf_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_tool_pf_info *info = NULL; + struct sss_card_node *node = sss_get_card_node(hal_dev); + u32 port_id; + int ret; + + if (!node) + return -ENODEV; + + if (!buf_out || (*out_size != sizeof(*info)) || !buf_in || in_size != sizeof(port_id)) { + tool_err("Invalid out_size from user: %u, expect: %lu, in_size:%u\n", + *out_size, sizeof(*info), in_size); + return -EINVAL; + } + + port_id = *((u32 *)buf_in); + info = (struct sss_tool_pf_info *)buf_out; + + ret = sss_get_pf_id(node, port_id, &info->pf_id, &info->valid); + if (ret != 0) + return ret; + + *out_size = sizeof(*info); + + return 0; +} + +struct sss_tool_hw_cmd_handle g_hw_cmd_handle[] = { + {SSS_TOOL_FUNC_TYPE, sss_tool_get_func_type}, + {SSS_TOOL_GET_FUNC_IDX, sss_tool_get_func_id}, + {SSS_TOOL_GET_CHIP_INFO, sss_tool_get_card_func_info}, + {SSS_TOOL_GET_DRV_VERSION, sss_tool_get_hw_drv_version}, + {SSS_TOOL_GET_PF_ID, sss_tool_get_pf_id}, + {SSS_TOOL_GET_FUNC_CAP, sss_tool_get_pf_cap_info}, + {SSS_TOOL_GET_SELF_TEST_RES, sss_tool_get_self_test_result}, + {SSS_TOOL_GET_CHIP_ID, sss_tool_get_all_chip_id_cmd}, + {SSS_TOOL_GET_PF_DEV_INFO, sss_tool_get_pf_dev_info}, + {SSS_TOOL_IS_DRV_IN_VM, sss_tool_is_driver_in_vm}, + {SSS_TOOL_CMD_FREE_MEM, sss_tool_free_all_card_mem}, + {SSS_TOOL_GET_CHIP_FAULT_STATS, (sss_tool_hw_cmd_func)sss_tool_get_chip_faults_stats}, + {SSS_TOOL_GET_SINGLE_CARD_INFO, (sss_tool_hw_cmd_func)sss_tool_get_single_card_info}, + {SSS_TOOL_GET_HW_STATS, (sss_tool_hw_cmd_func)sss_tool_get_hw_driver_stats}, + {SSS_TOOL_CLEAR_HW_STATS, sss_tool_clear_hw_driver_stats}, +}; + +int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int id; + int ret = 0; + int cmd_num = ARRAY_LEN(g_hw_cmd_handle); + enum sss_tool_driver_cmd_type cmd = + (enum sss_tool_driver_cmd_type)(tool_msg->msg_formate); + + for (id = 0; id < cmd_num; id++) { + if (cmd == g_hw_cmd_handle[id].cmd_type) { + ret = g_hw_cmd_handle[id].func + (hal_dev, buf_in, in_size, buf_out, out_size); + break; + } + } + + if (id == cmd_num) { + tool_err("Fail to send msg to hw, cmd: %d out of range\n", cmd); + return -EINVAL; + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h new file mode 100644 index 00000000000000..d02af2fe52c1c4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_SDK_H +#define SSS_TOOL_SDK_H + +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" +#include "sss_hw.h" + +long sss_tool_free_card_mem(int id); + +int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c new file mode 100644 index 00000000000000..549eb928f5c40d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_hwdev.h" +#include "sss_hwif_adm.h" +#include "sss_tool_comm.h" +#include "sss_tool_sm.h" + +#define SSS_TOOL_CHIP_ACK 1 +#define SSS_TOOL_CHIP_NOACK 0 + +#define SSS_TOOL_SM_CHIP_OP_READ 0x2 +#define SSS_TOOL_SM_CHIP_OP_READ_CLEAR 0x6 + +#define SSS_TOOL_BIT_32 32 + +struct sss_tool_sm_in { + int node; + int id; + int instance; +}; + +struct sss_tool_sm_out { + u64 val1; + u64 val2; +}; + +union sss_tool_sm_chip_request_head { + struct { + u32 pad:15; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +}; + +/* counter read request struct */ +struct sss_tool_sm_chip_request { + u32 extra; + union sss_tool_sm_chip_request_head head; + u32 ctr_id; + u32 initial; + u32 pad; +}; + +/* counter read response union */ +union sss_tool_chip_rd_response { + struct { + u32 value1:16; + u32 pad0:16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1:20; + u32 pad0:12; + u32 value2:12; + u32 pad1:20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; +}; + +typedef int (*sss_tool_sm_handler_func)(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf); + +struct sss_tool_sm_handler { + enum sss_tool_sm_cmd_type msg_name; + sss_tool_sm_handler_func sm_func; +}; + +static void sss_tool_sm_read_msg_create(struct sss_tool_sm_chip_request *request, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + request->head.value = 0; + request->head.bs.op_id = op_id; + request->head.bs.ack = ack; + request->head.bs.instance = instance_id; + request->head.value = HTONL(request->head.value); + + request->initial = init_val; + request->ctr_id = ctr_id; + request->ctr_id = HTONL(request->ctr_id); +} + +static void sss_tool_sm_node_htonl(u32 *node, u32 len) +{ + u32 *new_node = node; + u32 i; + + for (i = 0; i < len; i++) { + *new_node = HTONL(*new_node); + new_node++; + } +} + +static int sss_tool_sm_adm_msg_rd(void *hwdev, u32 id, u8 instance, + u8 node, union sss_tool_chip_rd_response *rsp, u8 opcode) +{ + struct sss_tool_sm_chip_request req = {0}; + int ret; + + if (!hwdev) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) { + tool_err("Fail to read sm data, device not support adm msg\n"); + return -EPERM; + } + + sss_tool_sm_read_msg_create(&req, instance, opcode, + SSS_TOOL_CHIP_ACK, id, 0); + + ret = sss_adm_msg_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)rsp, + (unsigned short)sizeof(*rsp)); + if (ret) { + tool_err("Fail to read sm data from adm msg, err(%d)\n", ret); + return ret; + } + + sss_tool_sm_node_htonl((u32 *)rsp, sizeof(*rsp) / sizeof(u32)); + + return 0; +} + +static int sss_tool_sm_msg_rd16(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u16 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss16_rsp.value1; +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u32 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss32_rsp.value1; +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd32_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u32 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss32_rsp.value1; + +out: + out_buf->val1 = val1; + return ret; +} + +static int sss_tool_sm_msg_rd128(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + if ((id & 0x1) != 0) { + tool_err("Invalid id(%u), It is odd number\n", id); + val1 = ~0; + val2 = ~0; + ret = -EINVAL; + goto out; + } + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 128 bits\n"); + val1 = ~0; + val2 = ~0; + goto out; + } + + sss_tool_sm_node_htonl((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l; + val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l; + +out: + out_buf->val1 = val1; + out_buf->val2 = val2; + + return ret; +} + +static int sss_tool_sm_msg_rd128_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + if ((id & 0x1) != 0) { + tool_err("Invalid id(%u), It is odd number\n", id); + val1 = ~0; + val2 = ~0; + ret = -EINVAL; + goto out; + } + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 128 bits\n"); + val1 = ~0; + val2 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l; + val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l; + +out: + out_buf->val1 = val1; + out_buf->val2 = val2; + + return ret; +} + +static int sss_tool_sm_msg_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 64 bits\n"); + val1 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2; + +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd64_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 64 bits\n"); + val1 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2; + +out: + out_buf->val1 = val1; + + return ret; +} + +const struct sss_tool_sm_handler g_sm_cmd_handle[] = { + {SSS_TOOL_SM_CMD_RD16, sss_tool_sm_msg_rd16}, + {SSS_TOOL_SM_CMD_RD32, sss_tool_sm_msg_rd32}, + {SSS_TOOL_SM_CMD_RD32_CLEAR, sss_tool_sm_msg_rd32_clear}, + {SSS_TOOL_SM_CMD_RD64, sss_tool_sm_msg_rd64}, + {SSS_TOOL_SM_CMD_RD64_CLEAR, sss_tool_sm_msg_rd64_clear}, + {SSS_TOOL_SM_CMD_RD64_PAIR, sss_tool_sm_msg_rd128}, + {SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR, sss_tool_sm_msg_rd128_clear} +}; + +int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int index; + int ret = 0; + int cmd_num = ARRAY_LEN(g_sm_cmd_handle); + u32 msg_formate = msg->msg_formate; + struct sss_tool_sm_in *sm_in = in_buf; + struct sss_tool_sm_out *sm_out = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid in_buf or out buf param\n"); + return -EINVAL; + } + + if (in_len != sizeof(*sm_in) || *out_len != sizeof(*sm_out)) { + tool_err("Invalid out buf size :%u, in buf size: %u\n", + *out_len, in_len); + return -EINVAL; + } + + for (index = 0; index < cmd_num; index++) { + if (msg_formate != g_sm_cmd_handle[index].msg_name) + continue; + + ret = g_sm_cmd_handle[index].sm_func(hal_dev->hwdev, (u32)sm_in->id, + (u8)sm_in->instance, (u8)sm_in->node, sm_out); + break; + } + + if (index == cmd_num) { + tool_err("Fail to execute msg %d,could not find callback\n", msg_formate); + return -EINVAL; + } + + if (ret != 0) + tool_err("Fail to get sm information, id:%u, instance:%u, node:%u, msg:%d\n", + sm_in->id, sm_in->instance, sm_in->node, msg_formate); + + *out_len = sizeof(*sm_out); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h new file mode 100644 index 00000000000000..7c32ebdf2f4d31 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_SM_H +#define SSS_TOOL_SM_H +#include "sss_pci_global.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) | \ + (((x) & 0x0000ff00) << 8) | \ + (((x) & 0x00ff0000) >> 8) | \ + (((x) & 0xff000000) >> 24)) +#endif + +int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h new file mode 100644 index 00000000000000..4a9dd7eee1ad6b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_AEQ_H +#define SSS_HW_AEQ_H + +enum sss_aeq_hw_event { + SSS_HW_FROM_INT = 0, + SSS_MBX_FROM_FUNC = 1, + SSS_MSG_FROM_MGMT = 2, + SSS_ADM_RSP = 3, + SSS_ADM_MSG_STS = 4, + SSS_MBX_SEND_RSLT = 5, + SSS_AEQ_EVENT_MAX +}; + +enum sss_aeq_sw_event { + SSS_STL_EVENT = 0, + SSS_STF_EVENT = 1, + SSS_AEQ_SW_EVENT_MAX +}; + +enum sss_ucode_event_type { + SSS_INTERN_ERR = 0x0, + SSS_CHN_BUSY = 0x7, + SSS_ERR_MAX = 0x8, +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h new file mode 100644 index 00000000000000..7626ec44b968e8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_CEQ_H +#define SSS_HW_CEQ_H + +enum sss_ceq_event { + SSS_NIC_CTRLQ = 0x3, + SSS_NIC_SQ, + SSS_NIC_RQ, + SSS_CEQ_EVENT_MAX, +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h new file mode 100644 index 00000000000000..aef21aa49b2882 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_COMMON_H +#define SSS_HW_COMMON_H + +#include + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN + +#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0]))) + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +enum sss_func_type { + SSS_FUNC_TYPE_PF, + SSS_FUNC_TYPE_VF, + SSS_FUNC_TYPE_PPF, + SSS_FUNC_TYPE_UNKNOWN, +}; + +struct sss_dma_addr_align { + u32 real_size; + + void *origin_vaddr; + dma_addr_t origin_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +enum sss_process_ret { + SSS_PROCESS_OK = 0, + SSS_PROCESS_DOING = 1, + SSS_PROCESS_ERR = 2, +}; + +struct sss_sge { + u32 high_addr; + u32 low_addr; + u32 len; +}; + +typedef enum sss_process_ret(*sss_wait_handler_t)(void *priv_data); + +/* * + * sssnic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +static inline void sss_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/* * + * sss_cpu_to_be32 - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + */ +static inline void sss_be32_to_cpu(void *data, int len) +{ + int i; + int data_len; + u32 *array = data; + + if (!data) + return; + + data_len = len / sizeof(u32); + + for (i = 0; i < data_len; i++) { + *array = be32_to_cpu(*array); + array++; + } +} + +/* * + * sss_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + */ +static inline void sss_set_sge(struct sss_sge *sge, dma_addr_t addr, int len) +{ + sge->high_addr = upper_32_bits(addr); + sge->low_addr = lower_32_bits(addr); + sge->len = len; +} + +#define sss_hw_be32(val) (val) +#define sss_hw_cpu32(val) (val) +#define sss_hw_cpu16(val) (val) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h new file mode 100644 index 00000000000000..71921daa24526b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_CTRLQ_H +#define SSS_HW_CTRLQ_H + +#include +#include + +struct sss_ctrl_msg_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; + + /* Usage count, USERS DO NOT USE */ + atomic_t ref_cnt; +}; + +/** + * @brief sss_alloc_ctrlq_msg_buf - alloc ctrlq msg buffer + * @param hwdev: device pointer to hwdev + * @retval non-zero: success + * @retval null: failure + **/ +struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev); + +/** + * @brief sss_free_ctrlq_msg_buf - free ctrlq msg buffer + * @param hwdev: device pointer to hwdev + * @param msg_buf: buffer to free + **/ +void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf); + +/** + * @brief sss_ctrlq_direct_reply - ctrlq direct message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param in_buf: message buffer in + * @param out_param: message out + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief sss_ctrlq_detail_reply - ctrlq detail message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param in_buf: message buffer in + * @param out_buf: message buffer out + * @param out_param: inline output data + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h new file mode 100644 index 00000000000000..362ba20656ce30 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_EVENT_H +#define SSS_HW_EVENT_H + +#include + +#include "sss_hw_svc_cap.h" + +enum sss_fault_source_type { + /* same as SSS_FAULT_TYPE_CHIP */ + SSS_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as SSS_FAULT_TYPE_NPU */ + SSS_FAULT_SRC_HW_MGMT_NPU, + /* same as SSS_FAULT_TYPE_MEM_RD_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as SSS_FAULT_TYPE_MEM_WR_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as SSS_FAULT_TYPE_REG_RD_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as SSS_FAULT_TYPE_REG_WR_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + SSS_FAULT_SRC_SW_MGMT_NPU, + SSS_FAULT_SRC_MGMT_WATCHDOG, + SSS_FAULT_SRC_MGMT_RESET = 8, + SSS_FAULT_SRC_HW_PHY_FAULT, + SSS_FAULT_SRC_TX_PAUSE_EXCP, + SSS_FAULT_SRC_PCIE_LINK_DOWN = 20, + SSS_FAULT_SRC_HOST_HEARTBEAT_LOST = 21, + SSS_FAULT_SRC_TX_TIMEOUT, + SSS_FAULT_SRC_TYPE_MAX, +}; + +enum sss_comm_event_type { + SSS_EVENT_PCIE_LINK_DOWN, + SSS_EVENT_HEART_LOST, + SSS_EVENT_FAULT, + SSS_EVENT_SRIOV_STATE_CHANGE, + SSS_EVENT_CARD_REMOVE, + SSS_EVENT_MGMT_WATCHDOG, + SSS_EVENT_MAX +}; + +enum sss_event_service_type { + SSS_EVENT_SRV_COMM, + SSS_SERVICE_EVENT_BASE, + SSS_EVENT_SRV_NIC = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_NIC, + SSS_EVENT_SRV_MIGRATE = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_MIGRATE, +}; + +enum sss_fault_err_level { + SSS_FAULT_LEVEL_FATAL, + SSS_FAULT_LEVEL_SERIOUS_RESET, + SSS_FAULT_LEVEL_HOST, + SSS_FAULT_LEVEL_SERIOUS_FLR, + SSS_FAULT_LEVEL_GENERAL, + SSS_FAULT_LEVEL_SUGGESTION, + SSS_FAULT_LEVEL_MAX, +}; + +enum sss_fault_type { + SSS_FAULT_TYPE_CHIP, + SSS_FAULT_TYPE_NPU, + SSS_FAULT_TYPE_MEM_RD_TIMEOUT, + SSS_FAULT_TYPE_MEM_WR_TIMEOUT, + SSS_FAULT_TYPE_REG_RD_TIMEOUT, + SSS_FAULT_TYPE_REG_WR_TIMEOUT, + SSS_FAULT_TYPE_PHY_FAULT, + SSS_FAULT_TYPE_TSENSOR_FAULT, + SSS_FAULT_TYPE_MAX, +}; + +#define SSS_SRV_EVENT_TYPE(svc, type) ((((u32)(svc)) << 16) | (type)) + +#define SSS_MGMT_CMD_UNSUPPORTED 0xFF + +union sss_fault_hw_mgmt { + u32 val[4]; + /* valid only type == SSS_FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum sss_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only if err_level == SSS_FAULT_LEVEL_SERIOUS_FLR */ + u8 rsvd1; + u8 host_id; + u16 func_id; + } chip; + + /* valid only if type == SSS_FAULT_TYPE_NPU */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only if type == SSS_FAULT_TYPE_MEM_RD_TIMEOUT || + * SSS_FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_id; + } mem_timeout; + + /* valid only if type == SSS_FAULT_TYPE_REG_RD_TIMEOUT || + * SSS_FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct sss_fault_event { + u8 type; /* enum sss_fault_type */ + u8 fault_level; /* sdk write fault level for uld event */ + u8 rsvd[2]; + union sss_fault_hw_mgmt info; +}; + +struct sss_cmd_fault_event { + u8 status; + u8 ver; + u8 rsvd[6]; + struct sss_fault_event fault_event; +}; + +struct sss_event_info { + u16 service; /* enum sss_event_service_type */ + u16 type; /* enum sss_comm_event_type */ + u8 event_data[104]; +}; + +typedef void (*sss_event_handler_t)(void *handle, struct sss_event_info *event); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h new file mode 100644 index 00000000000000..b14290fb2f2727 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_EXPORT_H +#define SSS_HW_EXPORT_H + +#include + +#include "sss_hw_irq.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_event.h" + +int sss_chip_set_msix_attr(void *hwdev, + struct sss_irq_cfg intr_cfg, u16 channel); + +/* * + * @brief sss_chip_clear_msix_resend_bit - clear msix resend bit + * @param hwdev: device pointer to hwdev + * @param msix_id: msix id + * @param clear_en: 1-clear + */ +void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en); + +/** + * @brief sss_chip_reset_function - reset func + * @param hwdev: device pointer to hwdev + * @param func_id: global function index + * @param flag: reset flag + * @param channel: channel id + */ +int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel); + +/** + * @brief sss_chip_set_root_ctx - set root context + * @param hwdev: device pointer to hwdev + * @param rq_depth: rq depth + * @param sq_depth: sq depth + * @param rx_size: rx buffer size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_chip_set_root_ctx(void *hwdev, + u32 rq_depth, u32 sq_depth, int rx_size, u16 channel); + +/** + * @brief sss_chip_clean_root_ctx - clean root context + * @param hwdev: device pointer to hwdev + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_chip_clean_root_ctx(void *hwdev, u16 channel); + +/* * + * @brief sss_get_mgmt_version - get management cpu version + * @param hwdev: device pointer to hwdev + * @param buf: output management version + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel); + +/** + * @brief sss_chip_set_func_used_state - set function service used state + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param state: function used state + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_chip_set_func_used_state(void *hwdev, + u16 service_type, bool state, u16 channel); + +bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability); + +/* * + * @brief sss_support_nic - function support nic + * @param hwdev: device pointer to hwdev + * @param cap: nic service capbility + * @retval true: function support nic + * @retval false: function not support nic + */ +bool sss_support_nic(void *hwdev); + +bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap); + +/* * + * @brief sss_get_max_sq_num - get max queue number + * @param hwdev: device pointer to hwdev + * @retval non-zero: max queue number + * @retval zero: failure + */ +u16 sss_get_max_sq_num(void *hwdev); + +/* * + * @brief sss_get_phy_port_id - get physical port id + * @param hwdev: device pointer to hwdev + * @retval physical port id + */ +u8 sss_get_phy_port_id(void *hwdev); /* Obtain sss_service_cap.port_id */ + +/* * + * @brief sss_get_max_vf_num - get vf number + * @param hwdev: device pointer to hwdev + * @retval non-zero: vf number + * @retval zero: failure + */ +u16 sss_get_max_vf_num(void *hwdev); /* Obtain sss_service_cap.max_vf */ + +/* * + * @brief sss_get_cos_valid_bitmap - get cos valid bitmap + * @param hwdev: device pointer to hwdev + * @retval non-zero: valid cos bit map + * @retval zero: failure + */ +int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap); + +/* * + * @brief sss_alloc_irq - alloc irq + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param alloc_array: alloc irq info + * @param alloc_num: alloc number + * @retval zero: failure + * @retval non-zero: success + */ +u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type, + struct sss_irq_desc *alloc_array, u16 alloc_num); + +/* * + * @brief sss_free_irq - free irq + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param irq_id: irq id + */ +void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id); + +/* * + * @brief sss_register_dev_event - register hardware event + * @param hwdev: device pointer to hwdev + * @param data: private data will be used by the callback + * @param callback: callback function + */ +void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback); + +/* * + * @brief sss_unregister_dev_event - unregister hardware event + * @param dev: device pointer to hwdev + */ +void sss_unregister_dev_event(void *dev); + +/* * + * @brief sss_get_dev_present_flag - get chip present flag + * @param hwdev: device pointer to hwdev + * @retval 1: chip is present + * @retval 0: chip is absent + */ +int sss_get_dev_present_flag(const void *hwdev); + +/* * + * @brief sss_get_max_pf_num - get global max pf number + */ +u8 sss_get_max_pf_num(void *hwdev); + +u16 sss_nic_intr_num(void *hwdev); + +/* * + * @brief sss_get_chip_present_state - get card present state + * @param hwdev: device pointer to hwdev + * @param present_state: return card present state + * @retval zero: success + * @retval non-zero: failure + */ +int sss_get_chip_present_state(void *hwdev, bool *present_state); + +/** + * @brief sss_fault_event_report - report fault event + * @param hwdev: device pointer to hwdev + * @param src: fault event source, reference to enum sss_fault_source_type + * @param level: fault level, reference to enum sss_fault_err_level + */ +void sss_fault_event_report(void *hwdev, u16 src, u16 level); + +/** + * @brief sss_register_service_adapter - register service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param service_adapter: service adapter + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type, + void *service_adapter); + +/** + * @brief sss_unregister_service_adapter - unregister service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + **/ +void sss_unregister_service_adapter(void *hwdev, + enum sss_service_type service_type); + +/** + * @brief sss_get_service_adapter - get service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @retval non-zero: success + * @retval null: failure + **/ +void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type); + +/** + * @brief sss_do_event_callback - evnet callback to notify service driver + * @param hwdev: device pointer to hwdev + * @param event: event info to service driver + */ +void sss_do_event_callback(void *hwdev, struct sss_event_info *event); + +/** + * @brief sss_update_link_stats - link event stats + * @param hwdev: device pointer to hwdev + * @param link_state: link status + */ +void sss_update_link_stats(void *hwdev, bool link_state); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h new file mode 100644 index 00000000000000..60354bcf0efac1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_IRQ_H +#define SSS_HW_IRQ_H + +#include + +enum sss_msix_auto_mask { + SSS_CLR_MSIX_AUTO_MASK, + SSS_SET_MSIX_AUTO_MASK, +}; + +enum sss_msix_state { + SSS_MSIX_ENABLE, + SSS_MSIX_DISABLE, +}; + +struct sss_irq_desc { + u16 msix_id; /* PCIe MSIX id */ + u16 rsvd; + u32 irq_id; /* OS IRQ id */ +}; + +struct sss_irq_cfg { + u32 lli_set; + u32 coalesc_intr_set; + u16 msix_id; + u8 lli_credit; + u8 lli_timer; + u8 pending; + u8 coalesc_timer; + u8 resend_timer; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h new file mode 100644 index 00000000000000..33b5338a3ed79b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MBX_H +#define SSS_HW_MBX_H + +#include + +/* between Driver to MPU */ +enum sss_mgmt_cmd { + /* flr */ + SSS_COMM_MGMT_CMD_FUNC_RESET = 0, + SSS_COMM_MGMT_CMD_FEATURE_NEGO, + SSS_COMM_MGMT_CMD_FLUSH_DOORBELL, + SSS_COMM_MGMT_CMD_START_FLUSH, + SSS_COMM_MGMT_CMD_SET_FUNC_FLR, + SSS_COMM_MGMT_CMD_GET_GLOBAL_ATTR, + SSS_COMM_MGMT_CMD_SET_PPF_FLR_TYPE, + SSS_COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + + /* msi-x */ + SSS_COMM_MGMT_CMD_CFG_MSIX_NUM = 10, + + /* init cfg */ + SSS_COMM_MGMT_CMD_SET_CTRLQ_CTXT = 20, + SSS_COMM_MGMT_CMD_SET_VAT, + SSS_COMM_MGMT_CMD_CFG_PAGESIZE, + SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + SSS_COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + SSS_COMM_MGMT_CMD_SET_DMA_ATTR, + + /* infra */ + SSS_COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, + SSS_COMM_MGMT_CMD_SET_MQM_CFG_INFO, + SSS_COMM_MGMT_CMD_SET_MQM_SRCH_GPA, + SSS_COMM_MGMT_CMD_SET_PPF_TMR, + SSS_COMM_MGMT_CMD_SET_PPF_HT_GPA, + SSS_COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, + SSS_COMM_MGMT_CMD_SET_MBX_CRDT, + SSS_COMM_MGMT_CMD_CFG_TEMPLATE, + SSS_COMM_MGMT_CMD_SET_MQM_LIMIT, + + /* get chip info */ + SSS_COMM_MGMT_CMD_GET_FW_VERSION = 60, + SSS_COMM_MGMT_CMD_GET_BOARD_INFO, + SSS_COMM_MGMT_CMD_SYNC_TIME, + SSS_COMM_MGMT_CMD_GET_HW_PF_INFOS, + SSS_COMM_MGMT_CMD_SEND_BDF_INFO, + SSS_COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, + SSS_COMM_MGMT_CMD_GET_SML_TABLE_INFO, + + /* update firmware */ + SSS_COMM_MGMT_CMD_UPDATE_FW = 80, + SSS_COMM_MGMT_CMD_ACTIVE_FW, + SSS_COMM_MGMT_CMD_HOT_ACTIVE_FW, + SSS_COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, + SSS_COMM_MGMT_CMD_SWITCH_CFG, + SSS_COMM_MGMT_CMD_CHECK_FLASH, + SSS_COMM_MGMT_CMD_CHECK_FLASH_RW, + SSS_COMM_MGMT_CMD_RESOURCE_CFG, + SSS_COMM_MGMT_CMD_UPDATE_BIOS, /* merge to SSS_COMM_MGMT_CMD_UPDATE_FW */ + SSS_COMM_MGMT_CMD_MPU_GIT_CODE, + + /* chip reset */ + SSS_COMM_MGMT_CMD_FAULT_REPORT = 100, + SSS_COMM_MGMT_CMD_WATCHDOG_INFO, + SSS_COMM_MGMT_CMD_MGMT_RESET, + SSS_COMM_MGMT_CMD_FFM_SET, + + /* chip info/log */ + SSS_COMM_MGMT_CMD_GET_LOG = 120, + SSS_COMM_MGMT_CMD_TEMP_OP, + SSS_COMM_MGMT_CMD_EN_AUTO_RST_CHIP, + SSS_COMM_MGMT_CMD_CFG_REG, + SSS_COMM_MGMT_CMD_GET_CHIP_ID, + SSS_COMM_MGMT_CMD_SYSINFO_DFX, + SSS_COMM_MGMT_CMD_PCIE_DFX_NTC, + SSS_COMM_MGMT_CMD_DICT_LOG_STATUS, /* LOG STATUS 127 */ + SSS_COMM_MGMT_CMD_MSIX_INFO, + SSS_COMM_MGMT_CMD_CHANNEL_DETECT, + + /* DFT mode */ + SSS_COMM_MGMT_CMD_GET_DIE_ID = 200, + SSS_COMM_MGMT_CMD_GET_EFUSE_TEST, + SSS_COMM_MGMT_CMD_EFUSE_INFO_CFG, + SSS_COMM_MGMT_CMD_GPIO_CTL, + SSS_COMM_MGMT_CMD_HI30_SERLOOP_START, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_SERLOOP_STOP, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_MBIST_SET_FLAG, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_MBIST_GET_RESULT, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_ECC_TEST, + SSS_COMM_MGMT_CMD_FUNC_BIST_TEST, + SSS_COMM_MGMT_CMD_VPD_SET, + SSS_COMM_MGMT_CMD_VPD_GET, + + SSS_COMM_MGMT_CMD_ERASE_FLASH, + SSS_COMM_MGMT_CMD_QUERY_FW_INFO, + SSS_COMM_MGMT_CMD_GET_CFG_INFO, + SSS_COMM_MGMT_CMD_GET_UART_LOG, + SSS_COMM_MGMT_CMD_SET_UART_CMD, + SSS_COMM_MGMT_CMD_SPI_TEST, + + /* ALL reg read/write merge to SSS_COMM_MGMT_CMD_CFG_REG */ + SSS_COMM_MGMT_CMD_UP_REG_GET, + SSS_COMM_MGMT_CMD_UP_REG_SET, + SSS_COMM_MGMT_CMD_REG_READ, + SSS_COMM_MGMT_CMD_REG_WRITE, + SSS_COMM_MGMT_CMD_MAG_REG_WRITE, + SSS_COMM_MGMT_CMD_ANLT_REG_WRITE, + + SSS_COMM_MGMT_CMD_HEART_EVENT, + SSS_COMM_MGMT_CMD_NCSI_OEM_GET_DRV_INFO, + SSS_COMM_MGMT_CMD_LASTWORD_GET, /* merge to SSS_COMM_MGMT_CMD_GET_LOG */ + SSS_COMM_MGMT_CMD_READ_BIN_DATA, + SSS_COMM_MGMT_CMD_WWPN_GET, + SSS_COMM_MGMT_CMD_WWPN_SET, + + SSS_COMM_MGMT_CMD_SEND_API_ACK_BY_UP, + + SSS_COMM_MGMT_CMD_SET_MAC, + + /* MPU patch cmd */ + SSS_COMM_MGMT_CMD_LOAD_PATCH, + SSS_COMM_MGMT_CMD_REMOVE_PATCH, + SSS_COMM_MGMT_CMD_PATCH_ACTIVE, + SSS_COMM_MGMT_CMD_PATCH_DEACTIVE, + SSS_COMM_MGMT_CMD_PATCH_SRAM_OPTIMIZE, + /* container host process */ + SSS_COMM_MGMT_CMD_CONTAINER_HOST_PROC, + /* nsci counter */ + SSS_COMM_MGMT_CMD_NCSI_COUNTER_PROC, +}; + +enum sss_channel_type { + SSS_CHANNEL_DEFAULT, + SSS_CHANNEL_COMM, + SSS_CHANNEL_NIC, + SSS_CHANNEL_ROCE, + SSS_CHANNEL_TOE, + SSS_CHANNEL_FC, + SSS_CHANNEL_OVS, + SSS_CHANNEL_DSW, + SSS_CHANNEL_MIG, + SSS_CHANNEL_CRYPT, + SSS_CHANNEL_MAX = 32, +}; + +enum sss_mbx_errcode { + SSS_MBX_ERRCODE_NO_ERRORS = 0, + /* VF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, + /* PPF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, + /* PF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, + /* The mbx data size is set to all zero */ + SSS_MBX_ERRCODE_ZERO_DATA_SIZE = 0x400, + /* The sender function attribute has not been learned by hardware */ + SSS_MBX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, + /* The receiver function attr has not been learned by hardware */ + SSS_MBX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, +}; + +/* CTRLQ MODULE_TYPE */ +enum sss_mod_type { + SSS_MOD_TYPE_COMM = 0, /* HW communication module */ + SSS_MOD_TYPE_L2NIC = 1, /* L2NIC module */ + SSS_MOD_TYPE_ROCE = 2, + SSS_MOD_TYPE_PLOG = 3, + SSS_MOD_TYPE_TOE = 4, + SSS_MOD_TYPE_FLR = 5, + SSS_MOD_TYPE_RSVD1 = 6, + SSS_MOD_TYPE_CFGM = 7, /* Configuration module */ + SSS_MOD_TYPE_QMM = 8, + SSS_MOD_TYPE_RSVD2 = 9, + COMM_MOD_FC = 10, + SSS_MOD_TYPE_OVS = 11, + SSS_MOD_TYPE_DSW = 12, + SSS_MOD_TYPE_MIGRATE = 13, + SSS_MOD_TYPE_SSSLINK = 14, + SSS_MOD_TYPE_CRYPT = 15, /* secure crypto module */ + SSS_MOD_TYPE_VIO = 16, + SSS_MOD_TYPE_IMU = 17, + SSS_MOD_TYPE_DFT = 18, /* DFT */ + SSS_MOD_TYPE_HW_MAX = 19, /* hardware max module id */ + /* Software module id, for PF/VF and multi-host */ + SSS_MOD_TYPE_SW_FUNC = 20, + SSS_MOD_TYPE_MAX, +}; + +/* func reset flag */ +enum sss_func_reset_flag { + SSS_RESET_TYPE_FLUSH_BIT = 0, + SSS_RESET_TYPE_MQM, + SSS_RESET_TYPE_SMF, + SSS_RESET_TYPE_PF_BW_CFG, + + SSS_RESET_TYPE_COMM = 10, + SSS_RESET_TYPE_COMM_MGMT_CH, + SSS_RESET_TYPE_COMM_CMD_CH, + SSS_RESET_TYPE_NIC, + SSS_RESET_TYPE_OVS, + SSS_RESET_TYPE_VBS, + SSS_RESET_TYPE_ROCE, + SSS_RESET_TYPE_FC, + SSS_RESET_TYPE_TOE, + SSS_RESET_TYPE_IPSEC, + SSS_RESET_TYPE_MAX, +}; + +#define SSS_NIC_RESET BIT(SSS_RESET_TYPE_NIC) +#define SSS_OVS_RESET BIT(SSS_RESET_TYPE_OVS) +#define SSS_VBS_RESET BIT(SSS_RESET_TYPE_VBS) +#define SSS_ROCE_RESET BIT(SSS_RESET_TYPE_ROCE) +#define SSS_FC_RESET BIT(SSS_RESET_TYPE_FC) +#define SSS_TOE_RESET BIT(SSS_RESET_TYPE_TOE) +#define SSS_IPSEC_RESET BIT(SSS_RESET_TYPE_IPSEC) + +typedef int (*sss_vf_mbx_handler_t)(void *pri_handle, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +typedef int (*sss_pf_mbx_handler_t)(void *pri_handle, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +typedef int (*sss_ppf_mbx_handler_t)(void *pri_handle, u16 pf_id, u16 vf_id, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +typedef int (*sss_pf_from_ppf_mbx_handler_t)(void *pri_handle, + u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief sss_register_pf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_pf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_pf_mbx_handler_t cb); + +/** + * @brief sss_register_vf_mbx_handler - vf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_vf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_vf_mbx_handler_t cb); + +/** + * @brief sss_unregister_pf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sss_unregister_pf_mbx_handler(void *hwdev, u8 mod); + +/** + * @brief sss_unregister_vf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sss_unregister_vf_mbx_handler(void *hwdev, u8 mod); + +/** + * @brief sss_sync_send_mbx_msg - msg to management cpu + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_sync_mbx_send_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +#define sss_sync_send_msg_ch(hwdev, cmd, buf_in, in_size, buf_out, out_size, channel) \ + sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_COMM, cmd, \ + buf_in, in_size, buf_out, out_size, 0, channel) + +#define sss_sync_send_msg(hwdev, cmd, buf_in, in_size, buf_out, out_size) \ + sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_COMM, cmd, \ + buf_in, in_size, buf_out, out_size, 0, SSS_CHANNEL_COMM) + +#define SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_ptr) \ + ((ret) != 0 || (out_len) == 0 || (cmd_ptr)->head.state != SSS_MGMT_CMD_SUCCESS) + +/** + * @brief sss_mbx_send_to_pf - vf mbx message to pf + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_mbx_send_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +/** + * @brief sss_mbx_send_to_vf - mbx message to vf + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_mbx_send_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h new file mode 100644 index 00000000000000..2280b234e06039 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MBX_MSG_H +#define SSS_HW_MBX_MSG_H + +#include + +#define SSS_MGMT_MSG_SET_CMD 1 +#define SSS_MGMT_MSG_GET_CMD 0 + +#define SSS_MGMT_CMD_SUCCESS 0 + +struct sss_mgmt_msg_head { + u8 state; + u8 version; + u8 rsvd0[6]; +}; + +struct sss_cmd_func_reset { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; + u64 reset_flag; +}; + +enum { + SSS_COMM_F_ADM = 1U << 0, + SSS_COMM_F_CLP = 1U << 1, + SSS_COMM_F_CHANNEL_DETECT = 1U << 2, + SSS_COMM_F_MBX_SEGMENT = 1U << 3, + SSS_COMM_F_CTRLQ_NUM = 1U << 4, + SSS_COMM_F_VIRTIO_VQ_SIZE = 1U << 5, +}; + +#define SSS_MAX_FEATURE_QWORD 4 +struct sss_cmd_feature_nego { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 feature[SSS_MAX_FEATURE_QWORD]; +}; + +struct sss_cmd_clear_doorbell { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; +}; + +struct sss_cmd_clear_resource { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; +}; + +struct sss_comm_global_attr { + u8 max_host_num; + u8 max_pf_num; + u16 vf_id_start; + + u8 mgmt_host_node_id; /* for adm msg to mgmt cpu */ + u8 ctrlq_num; + u8 rsvd1[2]; + u32 rsvd2[8]; +}; + +struct sss_cmd_channel_detect { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1[3]; + u32 rsvd2[2]; +}; + +enum sss_svc_type { + SSS_SVC_TYPE_COM = 0, + SSS_SVC_TYPE_NIC, + SSS_SVC_TYPE_OVS, + SSS_SVC_TYPE_ROCE, + SSS_SVC_TYPE_TOE, + SSS_SVC_TYPE_IOE, + SSS_SVC_TYPE_FC, + SSS_SVC_TYPE_VBS, + SSS_SVC_TYPE_IPSEC, + SSS_SVC_TYPE_VIRTIO, + SSS_SVC_TYPE_MIGRATE, + SSS_SVC_TYPE_PPA, + SSS_SVC_TYPE_MAX, +}; + +struct sss_cmd_func_svc_used_state { + struct sss_mgmt_msg_head head; + u16 func_id; + u16 svc_type; + u8 used_state; + u8 rsvd[35]; +}; + +struct sss_cmd_get_glb_attr { + struct sss_mgmt_msg_head head; + + struct sss_comm_global_attr attr; +}; + +enum sss_fw_ver_type { + SSS_FW_VER_TYPE_BOOT, + SSS_FW_VER_TYPE_MPU, + SSS_FW_VER_TYPE_NPU, + SSS_FW_VER_TYPE_SMU_L0, + SSS_FW_VER_TYPE_SMU_L1, + SSS_FW_VER_TYPE_CFG, +}; + +#define SSS_FW_VERSION_LEN 16 +#define SSS_FW_COMPILE_TIME_LEN 20 +struct sss_cmd_get_fw_version { + struct sss_mgmt_msg_head head; + + u16 fw_type; + u16 rsvd; + u8 ver[SSS_FW_VERSION_LEN]; + u8 time[SSS_FW_COMPILE_TIME_LEN]; +}; + +/* hardware define: ctrlq context */ +struct sss_ctrlq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct sss_cmd_ctrlq_ctxt { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 ctrlq_id; + u8 rsvd[5]; + + struct sss_ctrlq_ctxt_info ctxt; +}; + +struct sss_cmd_root_ctxt { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 set_ctrlq_depth; + u8 ctrlq_depth; + u16 rx_buf_sz; + u8 lro_en; + u8 rsvd1; + u16 sq_depth; + u16 rq_depth; + u64 rsvd2; +}; + +struct sss_cmd_wq_page_size { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 page_size; + + u32 rsvd; +}; + +struct sss_cmd_msix_config { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 msix_index; + u8 pending_cnt; + u8 coalesce_timer_cnt; + u8 resend_timer_cnt; + u8 lli_timer_cnt; + u8 lli_credit_cnt; + u8 rsvd2[5]; +}; + +struct sss_cmd_dma_attr_config { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 entry_id; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u32 resv; +}; + +struct sss_cmd_ceq_ctrl_reg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 qid; + u32 ctrl0; + u32 ctrl1; + u32 rsvd1; +}; + +struct sss_board_info { + u8 board_type; + u8 port_num; + u8 port_speed; + u8 pcie_width; + u8 host_num; + u8 pf_num; + u16 vf_total_num; + u8 tile_num; + u8 qcm_num; + u8 core_num; + u8 work_mode; + u8 service_mode; + u8 pcie_mode; + u8 boot_sel; + u8 board_id; + u32 cfg_addr; + u32 service_en_bitmap; + u8 scenes_id; + u8 cfg_tmpl_id; + u8 hw_id; + u8 rsvd; + u16 pf_vendor_id; + u8 tile_bitmap; + u8 sm_bitmap; +}; + +struct sss_cmd_board_info { + struct sss_mgmt_msg_head head; + + struct sss_board_info info; + u32 rsvd[22]; +}; + +struct sss_cmd_sync_time { + struct sss_mgmt_msg_head head; + + u64 mstime; + u64 rsvd; +}; + +struct sss_cmd_bdf_info { + struct sss_mgmt_msg_head head; + + u16 function_id; + u8 rsvd1[2]; + u8 bus; + u8 device; + u8 function; + u8 rsvd2[5]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h new file mode 100644 index 00000000000000..61ed2206cd3bac --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MGMT_H +#define SSS_HW_MGMT_H + +enum sss_hwdev_init_state { + SSS_HW_NOT_INIT_OK = 0, + SSS_HW_ADM_INIT_OK, + SSS_HW_MBX_INIT_OK, + SSS_HW_CTRLQ_INIT_OK, +}; + +typedef void (*sss_mgmt_msg_handler_t)(void *data, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size); + +int sss_register_mgmt_msg_handler(void *hwdev, u8 mod_type, void *data, + sss_mgmt_msg_handler_t handler); + +void sss_unregister_mgmt_msg_handler(void *hwdev, u8 mod_type); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h new file mode 100644 index 00000000000000..41f053608b353a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_SRIOV_H +#define SSS_HW_SRIOV_H + +#include + +struct sss_sriov_state_info { + u8 enable; + u16 vf_num; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h new file mode 100644 index 00000000000000..0dbb4b6963ea71 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_STATISTICS_H +#define SSS_HW_STATISTICS_H + +#include +#include + +#include "sss_hw_event.h" +#include "sss_hw_aeq.h" + +struct sss_qmm_stats { + atomic_t qmm_rsv_cnt[134]; +}; + +struct sss_link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +struct sss_fault_event_stats { + atomic_t chip_fault_stats[22][SSS_FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[SSS_FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct sss_hw_stats { + atomic_t heart_lost_stats; + struct sss_qmm_stats qmm_stats; + struct sss_link_event_stats link_event_stats; + struct sss_fault_event_stats fault_event_stats; + atomic_t nic_ucode_event_stats[SSS_ERR_MAX]; +}; + +#define SSS_CHIP_FAULT_SIZE (110 * 1024) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h new file mode 100644 index 00000000000000..158ba77fe66358 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_SVC_CAP_H +#define SSS_HW_SVC_CAP_H + +#include + +enum sss_service_type { + SSS_SERVICE_TYPE_NIC = 0, + SSS_SERVICE_TYPE_OVS, + SSS_SERVICE_TYPE_ROCE, + SSS_SERVICE_TYPE_TOE, + SSS_SERVICE_TYPE_IOE, + SSS_SERVICE_TYPE_FC, + SSS_SERVICE_TYPE_VBS, + SSS_SERVICE_TYPE_IPSEC, + SSS_SERVICE_TYPE_VIRTIO, + SSS_SERVICE_TYPE_MIGRATE, + SSS_SERVICE_TYPE_PPA, + SSS_SERVICE_TYPE_CUSTOM, + SSS_SERVICE_TYPE_VROCE, + SSS_SERVICE_TYPE_MAX, + + SSS_SERVICE_TYPE_INTF = (1 << 15), + SSS_SERVICE_TYPE_QMM = (1 << 16), +}; + +/* RDMA service capability */ +enum { + SSS_RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), + SSS_RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), + SSS_RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), + SSS_RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), + SSS_RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), + SSS_RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), + + SSS_RDMA_DEV_CAP_FLAG_XRC = (1 << 6), + SSS_RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), + SSS_RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), + SSS_RDMA_DEV_CAP_FLAG_APM = (1 << 9), +}; + +struct sss_ppa_service_cap { + u16 qpc_pseudo_vf_start; + u16 qpc_pseudo_vf_num; + u32 qpc_pseudo_vf_ctx_num; + u32 pctx_size; /* 512B */ + u32 bloomfilter_len; + u8 bloomfilter_en; + u8 rsvd0; + u16 rsvd1; +}; + +struct sss_vbs_service_cap { + u16 vbs_max_volq; + u16 rsvd1; +}; + +/* PF/VF ToE service resource */ +struct sss_dev_toe_svc_cap { + u32 max_pctx; /* Parent Context: max specifications 1M */ + u32 max_cctxt; + u32 max_cq; + u16 max_srq; + u32 srq_id_start; + u32 max_mpt; +}; + +/* ToE services */ +struct sss_toe_service_cap { + struct sss_dev_toe_svc_cap dev_toe_cap; + + u8 alloc_flag; + u8 rsvd[3]; + u32 pctx_size; /* 1KB */ + u32 scqc_size; /* 64B */ +}; + +/* PF FC service resource */ +struct sss_dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048 */ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048 */ + u32 child_qpc_id_start; + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ */ + u32 srq_num; /* Number of SRQ is 2 */ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FC services */ +struct sss_fc_service_cap { + struct sss_dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode) */ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B */ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B) */ + u32 srqe_size; /* 32B */ +}; + +struct sss_dev_roce_svc_own_cap { + u32 max_qp; + u32 max_cq; + u32 max_srq; + u32 max_mpt; + u32 max_drc_qp; + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_size; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_size; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_size; + + u32 qpc_entry_size; + u32 max_wqe; + u32 max_rq_sg; + u32 max_sq_inline_data_size; + u32 max_rq_desc_size; + + u32 rdmarc_entry_size; + u32 max_qp_init_rdma; + u32 max_qp_dest_rdma; + + u32 max_srq_wqe; + u32 reserved_srq; + u32 max_srq_sge; + u32 srqc_entry_size; + + u32 max_msg_size; /* Message size 2GB */ +}; + +/* RDMA service capability */ +struct sss_dev_rdma_svc_cap { + struct sss_dev_roce_svc_own_cap roce_own_cap; +}; + +struct sss_nic_service_cap { + u16 max_sq; + u16 max_rq; + u16 def_queue_num; +}; + +/* RDMA services */ +struct sss_rdma_service_cap { + struct sss_dev_rdma_svc_cap dev_rdma_cap; + + /* 1. the number of MTT PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_mtt; + + /* Number of MTT table (4M), is actually MTT seg number */ + u32 mtt_num; + + u32 log_mtt_seg; + u32 mtt_entry_size; /* MTT table size 8B, including 1 PA(64bits) */ + u32 mpt_entry_size; /* MPT table size (64B) */ + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_size; + + /* 1. the number of RDMArc PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_rdmarc; + + u32 reserved_qp; /* Number of reserved QP */ + u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ + + /* WQE maximum size of SQ(1024B), inline maximum + * size if 960B(944B aligned to the 960B), + * 960B=>wqebb alignment=>1024B + */ + u32 max_sq_desc_size; + + /* Currently, the supports 64B and 128B, + * defined as 64Bytes + */ + u32 wqebb_size; + + u32 max_cqe; /* Size of the depth of the CQ (64K-1) */ + u32 reserved_cq; /* Number of reserved CQ */ + u32 cqc_entry_size; /* Size of the CQC (64B/128B) */ + u32 cqe_size; /* Size of CQE (32B) */ + + u32 reserved_mrw; /* Number of reserved MR/MR Window */ + + /* max MAP of FMR, + * (1 << (32-ilog2(num_mpt)))-1; + */ + u32 max_fmr_map; + + u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ + + /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ + u32 local_ca_ack_delay; + u32 port_num; /* Physical port number */ + + u32 db_page_size; /* Size of the DB (4KB) */ + u32 direct_wqe_size; /* Size of the DWQE (256B) */ + + u32 pd_num; /* Maximum number of PD (128K) */ + u32 reserved_pd; /* Number of reserved PD */ + u32 max_xrcd; /* Maximum number of xrcd (64K) */ + u32 reserved_xrcd; /* Number of reserved xrcd */ + + u32 max_gid_per_port; /* gid number (16) of each port */ + + /* RoCE v2 GID table is 32B, + * compatible RoCE v1 expansion + */ + u32 gid_entry_size; + + u32 reserved_lkey; /* local_dma_lkey */ + u32 comp_vector_num; /* Number of complete vector (32) */ + u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size */ + + u32 flag; /* RDMA some identity */ + u32 max_frpl_len; /* Maximum number of pages frmr registration */ + u32 max_pkey; /* Number of supported pkey group */ +}; + +/* PF OVS service resource */ +struct sss_dev_ovs_svc_cap { + u32 max_pctx; /* Parent Context: max specifications 1M */ + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_num; + u16 pseudo_vf_start_id; + u8 dynamic_qp_en; +}; + +/* OVS services */ +struct sss_ovs_service_cap { + struct sss_dev_ovs_svc_cap dev_ovs_cap; + + u32 pctx_size; /* 512B */ +}; + +/* PF IPsec service resource */ +struct sss_dev_ipsec_svc_cap { + u32 max_sactx; /* max IPsec SA context num */ + u16 max_cq; /* max IPsec SCQC num */ + u16 rsvd0; +}; + +/* IPsec services */ +struct sss_ipsec_service_cap { + struct sss_dev_ipsec_svc_cap dev_ipsec_cap; + u32 sactx_size; /* 512B */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h new file mode 100644 index 00000000000000..677008109e18f7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_ULD_DRIVER_H +#define SSS_HW_ULD_DRIVER_H + +#include "sss_hw_event.h" +#include "sss_hw_svc_cap.h" + +struct sss_hal_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +struct sss_uld_info { + /* When it is unnessary to initialize the uld dev, + * @probe needs to return 0 and uld_dev is set to NULL; + * if uld_dev is NULL, @remove will not be called when uninstalling + */ + int (*probe)(struct sss_hal_dev *hal_dev, void **uld_dev, char *uld_dev_name); + void (*remove)(struct sss_hal_dev *hal_dev, void *uld_dev); + int (*suspend)(struct sss_hal_dev *hal_dev, void *uld_dev, pm_message_t state); + int (*resume)(struct sss_hal_dev *hal_dev, void *uld_dev); + void (*event)(struct sss_hal_dev *hal_dev, void *uld_dev, + struct sss_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); +}; + +/* sss_register_uld - register an upper driver + * @type: uld service type + * @uld_info: uld callback + * + * Registers an upper-layer driver. + * Traverse existing devices and call @probe to initialize the uld device. + */ +int sss_register_uld(enum sss_service_type type, struct sss_uld_info *uld_info); + +/** + * sss_unregister_uld - unregister an upper driver + * @type: uld service type + * + * Traverse existing devices and call @remove to uninstall the uld device. + * Unregisters an existing upper-layer driver. + */ +void sss_unregister_uld(enum sss_service_type type); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h new file mode 100644 index 00000000000000..dd9dd0695a15b4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_WQ_H +#define SSS_HW_WQ_H +#include + +#include "sss_hw_common.h" + +struct sss_wq { + u16 ci; + u16 pi; + + u32 q_depth; + u16 id_mask; + u16 elem_size_shift; + u16 page_num; + u16 rsvd1; + u32 elem_per_page; + u16 elem_per_page_shift; + u16 elem_per_page_mask; + + struct sss_dma_addr_align *page; + + dma_addr_t block_paddr; + u64 *block_vaddr; + + void *dev_hdl; + u32 page_size; + u16 elem_size; + u16 rsvd2; +} ____cacheline_aligned; + +#define SSS_WQ_MASK_ID(wq, id) ((id) & (wq)->id_mask) +#define SSS_WQ_MASK_PAGE(wq, pg_id) \ + ((pg_id) < (wq)->page_num ? (pg_id) : 0) +#define SSS_WQ_PAGE_ID(wq, id) ((id) >> (wq)->elem_per_page_shift) +#define SSS_WQ_OFFSET_IN_PAGE(wq, id) ((id) & (wq)->elem_per_page_mask) +#define SSS_WQ_GET_WQEBB_ADDR(wq, pg_id, id_in_pg) \ + ((u8 *)(wq)->page[pg_id].align_vaddr + \ + ((id_in_pg) << (wq)->elem_size_shift)) +#define SSS_WQ_IS_0_LEVEL_CLA(wq) ((wq)->page_num == 1) + +static inline u16 sss_wq_free_wqebb(struct sss_wq *wq) +{ + return wq->q_depth - ((wq->q_depth + wq->pi - wq->ci) & wq->id_mask) - 1; +} + +static inline bool sss_wq_is_empty(struct sss_wq *wq) +{ + return SSS_WQ_MASK_ID(wq, wq->pi) == SSS_WQ_MASK_ID(wq, wq->ci); +} + +static inline void *sss_wq_get_one_wqebb(struct sss_wq *wq, u16 *pi) +{ + *pi = SSS_WQ_MASK_ID(wq, wq->pi); + wq->pi++; + + return SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_PAGE_ID(wq, *pi), + SSS_WQ_OFFSET_IN_PAGE(wq, *pi)); +} + +static inline void *sss_wq_get_multi_wqebb(struct sss_wq *wq, + u16 num_wqebbs, u16 *pi, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + u32 pg_id; + u32 off_in_page; + + *pi = SSS_WQ_MASK_ID(wq, wq->pi); + wq->pi += num_wqebbs; + + pg_id = SSS_WQ_PAGE_ID(wq, *pi); + off_in_page = SSS_WQ_OFFSET_IN_PAGE(wq, *pi); + + if (off_in_page + num_wqebbs > wq->elem_per_page) { + /* wqe across wq page boundary */ + *second_part_wqebbs_addr = + SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_MASK_PAGE(wq, pg_id + 1), 0); + *first_part_wqebbs_num = wq->elem_per_page - off_in_page; + } else { + *second_part_wqebbs_addr = NULL; + *first_part_wqebbs_num = num_wqebbs; + } + + return SSS_WQ_GET_WQEBB_ADDR(wq, pg_id, off_in_page); +} + +static inline void sss_update_wq_ci(struct sss_wq *wq, u16 num_wqebbs) +{ + wq->ci += num_wqebbs; +} + +static inline void *sss_wq_wqebb_addr(struct sss_wq *wq, u16 id) +{ + return SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_PAGE_ID(wq, id), + SSS_WQ_OFFSET_IN_PAGE(wq, id)); +} + +static inline void *sss_wq_read_one_wqebb(struct sss_wq *wq, u16 *ci) +{ + *ci = SSS_WQ_MASK_ID(wq, wq->ci); + + return sss_wq_wqebb_addr(wq, *ci); +} + +static inline u64 sss_wq_get_first_wqe_page_addr(struct sss_wq *wq) +{ + return wq->page[0].align_paddr; +} + +static inline void sss_wq_reset(struct sss_wq *wq) +{ + u16 pg_id; + + wq->ci = 0; + wq->pi = 0; + + for (pg_id = 0; pg_id < wq->page_num; pg_id++) + memset(wq->page[pg_id].align_vaddr, 0, wq->page_size); +} + +int sss_create_wq(void *hwdev, struct sss_wq *wq, u32 q_depth, u16 block_size); +void sss_destroy_wq(struct sss_wq *wq); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h new file mode 100644 index 00000000000000..e83810dde176cc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_EXPORT_H +#define SSS_HWIF_EXPORT_H + +#include + +#include "sss_hw_common.h" +#include "sss_hw_irq.h" + +/** + * @brief sss_alloc_db_addr - alloc doorbell + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_alloc_db_addr(void *hwdev, void __iomem **db_base); + +/** + * @brief sss_free_db_addr - free doorbell + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to free doorbell base address + **/ +void sss_free_db_addr(void *hwdev, const void __iomem *db_base); + +/* * + * @brief sss_nic_set_msix_auto_mask - set msix auto mask function + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param flag: msix auto_mask flag, 1-enable, 2-clear + */ +void sss_chip_set_msix_auto_mask(void *hwdev, u16 msix_id, + enum sss_msix_auto_mask flag); + +/* * + * @brief sss_chip_set_msix_state - set msix state + * @param hwdev: device pointer to hwdev + * @param msix_id: msix id + * @param flag: msix state flag, 0-enable, 1-disable + */ +void sss_chip_set_msix_state(void *hwdev, u16 msix_id, + enum sss_msix_state flag); + +/* * + * @brief sss_get_global_func_id - get global function id + * @param hwdev: device pointer to hwdev + * @retval global function id + */ +u16 sss_get_global_func_id(void *hwdev); + +/* * + * @brief sss_get_pf_id_of_vf - get pf id of vf + * @param hwdev: device pointer to hwdev + * @retval pf id + */ +u8 sss_get_pf_id_of_vf(void *hwdev); + +/* * + * @brief sss_get_pcie_itf_id - get pcie port id + * @param hwdev: device pointer to hwdev + * @retval pcie port id + */ +u8 sss_get_pcie_itf_id(void *hwdev); + +/* * + * @brief sss_get_func_type - get function type + * @param hwdev: device pointer to hwdev + * @retval function type + */ +enum sss_func_type sss_get_func_type(void *hwdev); + +enum sss_func_type sss_get_func_id(void *hwdev); + +/* * + * @brief sss_get_glb_pf_vf_offset - get vf offset id of pf + * @param hwdev: device pointer to hwdev + * @retval vf offset id + */ +u16 sss_get_glb_pf_vf_offset(void *hwdev); + +/* * + * @brief sss_get_ppf_id - get ppf id + * @param hwdev: device pointer to hwdev + * @retval ppf id + */ +u8 sss_get_ppf_id(void *hwdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h b/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h new file mode 100644 index 00000000000000..e8c123f7189ba9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_LINUX_KERNEL_H_ +#define SSS_LINUX_KERNEL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +#include +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +/* ************************************************************************ */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV + +/* ************************************************************************ */ +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif +#define HAVE_INET6_IFADDR_LIST + +/* ************************************************************************ */ +#define HAVE_NDO_GET_STATS64 + +/* ************************************************************************ */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif + +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY + +/* ************************************************************************ */ +#define HAVE_ETHTOOL_SET_PHYS_ID + +/* ************************************************************************ */ +#define HAVE_NETDEV_WANTED_FEAUTES + +/* ************************************************************************ */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif + +/* ************************************************************************ */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif + +/* ************************************************************************ */ +#define _kc_kmap_atomic(page) kmap_local_page(page) +#define _kc_kunmap_atomic(addr) kunmap_local(addr) + +/* ************************************************************************ */ +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO + +/* ************************************************************************ */ +#define HAVE_NAPI_GRO_FLUSH_OLD + +/* ************************************************************************ */ +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +/* ************************************************************************ */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define HAVE_SKB_INNER_NETWORK_HEADER + +/* ************************************************************************ */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES + +/* ************************************************************************ */ +#define HAVE_VXLAN_CHECKS +#define HAVE_NDO_SELECT_QUEUE_ACCEL + +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + +/* ************************************************************************ */ +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK + +/* ************************************************************************ */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#define HAVE_VLAN_FIND_DEV_DEEP_RCU + +/* ************************************************************************ */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_MULTI_VLAN_OFFLOAD_EN +#define HAVE_ETH_GET_HEADLEN_FUNC + +/* ************************************************************************ */ +#define HAVE_RXFH_HASHFUNC + +/****************************************************************/ +#define HAVE_NDO_SET_VF_TRUST + +/* ************************************************************** */ +#include + +/* ************************************************************** */ +#define HAVE_IO_MAP_WC_SIZE + +/* ************************************************************************ */ +#define HAVE_NETDEVICE_MIN_MAX_MTU + +/* ************************************************************************ */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA + +/* ************************************************************************ */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_ADM_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK + +/* ************************************************************************ */ +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_XDP_SUPPORT + +/* ************************************************************************ */ +#define HAVE_NDO_BPF_NETDEV_BPF +#define HAVE_TIMER_SETUP +#define HAVE_XDP_DATA_META + +/* ************************************************************************ */ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV + +/*****************************************************************************/ +#define dev_open(x) dev_open(x, NULL) +#define HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY + +#ifndef get_ds +#define get_ds() (KERNEL_DS) +#endif + +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _sss_nic_dma_zalloc_coherent(d, s, h, f) +static inline void *_sss_nic_dma_zalloc_coherent(struct device *dev, + size_t size, dma_addr_t *dma_handle, gfp_t gfp) +{ + /* Above kernel 5.0, fixed up all remaining architectures + * to zero the memory in dma_alloc_coherent, and made + * dma_zalloc_coherent a no-op wrapper around dma_alloc_coherent, + * which fixes all of the above issues. + */ + return dma_alloc_coherent(dev, size, dma_handle, gfp); +} +#endif + +struct timeval { + __kernel_old_time_t tv_sec; /* seconds */ + __kernel_suseconds_t tv_usec; /* microseconds */ +}; + +#ifndef do_gettimeofday +#define do_gettimeofday(time) _kc_do_gettimeofday(time) +static inline void _kc_do_gettimeofday(struct timeval *tv) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} +#endif + +/*****************************************************************************/ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY +#define ETH_GET_HEADLEN_NEED_DEV + +/*****************************************************************************/ +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f)) +#endif + +/*****************************************************************************/ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS + +/*****************************************************************************/ +#ifndef rtc_time_to_tm +#define rtc_time_to_tm rtc_time64_to_tm +#endif +#define HAVE_NDO_TX_TIMEOUT_TXQ + +/*****************************************************************************/ +#define SUPPORTED_COALESCE_PARAMS + +#ifndef pci_cleanup_aer_uncorrect_error_status +#define pci_cleanup_aer_uncorrect_error_status pci_aer_clear_nonfatal_status +#endif + +/* ************************************************************************ */ +#define HAVE_XDP_FRAME_SZ + +/* ************************************************************************ */ +#define HAVE_ENCAPSULATION_TSO +#define HAVE_ENCAPSULATION_CSUM + +/* ************************************************************************ */ +#define HAVE_BFP_WARN_NETDEV_PARAM +#define USE_OLD_PCI_FUNCTION +#define CLASS_CREATE_WITH_ONE_PARAM +#define NEED_NETIF_NAPI_ADD_NO_WEIGHT +#define HAS_DEVLINK_ALLOC_SETS_DEV +#define NO_DEVLINK_REGISTER_SETS_DEV +#define DEVLINK_REGISTER_RETURN_VOID +#define devlink_params_publish(x) do {} while (0) +#define devlink_params_unpublish(x) do {} while (0) + +#ifndef eth_zero_addr +static inline void __kc_eth_zero_addr(u8 *addr) +{ + memset(addr, 0x00, ETH_ALEN); +} + +#define eth_zero_addr(_addr) __kc_eth_zero_addr(_addr) +#endif + +#ifndef netdev_hw_addr_list_for_each +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) +#endif + +#define spin_lock_deinit(lock) + +#define destroy_work(work) + +#ifndef HAVE_TIMER_SETUP +void initialize_timer(const void *adapter_hdl, struct timer_list *timer); +#endif + +#define nicif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) + +#define destroy_completion(completion) +#define sema_deinit(lock) +#define mutex_deinit(lock) +#define rwlock_deinit(lock) + +#define tasklet_state(tasklet) ((tasklet)->state) + +#ifndef hash_init +#define HASH_SIZE(name) (ARRAY_SIZE(name)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) +#endif + +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF sizeof_field +#endif + +#ifndef HAVE_TX_TIMEOUT_TXQUEUE +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif + +#define HAS_ETHTOOL_SUPPORTED_COALESCE_PARAMS +#define SSSNIC_SUPPORTED_COALESCE_PARAMS \ + (ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS | \ +ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | \ +ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ +ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ +ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \ +ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH) + +#ifndef DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS +#define DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS +#endif + +#if IS_BUILTIN(CONFIG_NET_DEVLINK) +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#endif +#endif + +#endif +/* ************************************************************************ */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h b/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h new file mode 100644 index 00000000000000..9a2bf99f0b3c37 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_H +#define SSS_HW_H + +#include "sss_hw_aeq.h" +#include "sss_hw_ceq.h" +#include "sss_hw_ctrlq.h" +#include "sss_hw_common.h" +#include "sss_hw_event.h" +#include "sss_hw_export.h" +#include "sss_hw_irq.h" +#include "sss_hw_mbx.h" +#include "sss_hw_mbx_msg.h" +#include "sss_hw_mgmt.h" +#include "sss_hw_sriov.h" +#include "sss_hw_statistics.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_wq.h" +#include "sss_hwif_export.h" + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h b/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h new file mode 100644 index 00000000000000..19b2aa3b7fa3ee --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_KERNEL_H +#define SSS_KERNEL_H + +#include "sss_linux_kernel.h" + +#define sdk_err(dev, format, ...) dev_err(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) dev_warn(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) dev_notice(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) dev_info(dev, "[BASE]" format, ##__VA_ARGS__) + +#define nic_err(dev, format, ...) dev_err(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_warn(dev, format, ...) dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_notice(dev, format, ...) dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_info(dev, format, ...) dev_info(dev, "[NIC]" format, ##__VA_ARGS__) + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN +#define USEC_PER_MSEC 1000L +#define MSEC_PER_SEC 1000L + +#endif /* OSSL_KNL_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h b/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h new file mode 100644 index 00000000000000..48a2937b3bbd2e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_COMM_H +#define SSS_TOOL_COMM_H + +#define tool_err(format, ...) pr_err(format, ##__VA_ARGS__) +#define tool_warn(format, ...) pr_warn(format, ##__VA_ARGS__) +#define tool_info(format, ...) pr_info(format, ##__VA_ARGS__) + +#define SSS_TOOL_SHOW_ITEM_LEN 32 + +#define SSS_TOOL_VERSION_INFO_LEN 128 + +#define SSS_TOOL_EPERM 1 /* Operation not permitted */ +#define SSS_TOOL_EIO 2 /* I/O error */ +#define SSS_TOOL_EINVAL 3 /* Invalid argument */ +#define SSS_TOOL_EBUSY 4 /* Device or resource busy */ +#define SSS_TOOL_EOPNOTSUPP 0xFF /* Operation not supported */ + +enum sss_tool_driver_cmd_type { + SSS_TOOL_GET_TX_INFO = 1, + SSS_TOOL_GET_Q_NUM, + SSS_TOOL_GET_TX_WQE_INFO, + SSS_TOOL_TX_MAPPING, + SSS_TOOL_GET_RX_INFO, + SSS_TOOL_GET_RX_WQE_INFO, + SSS_TOOL_GET_RX_CQE_INFO, + SSS_TOOL_UPRINT_FUNC_EN, + SSS_TOOL_UPRINT_FUNC_RESET, + SSS_TOOL_UPRINT_SET_PATH, + SSS_TOOL_UPRINT_GET_STATISTICS, + SSS_TOOL_FUNC_TYPE, + SSS_TOOL_GET_FUNC_IDX, + SSS_TOOL_GET_INTER_NUM, + SSS_TOOL_CLOSE_TX_STREAM, + SSS_TOOL_GET_DRV_VERSION, + SSS_TOOL_CLEAR_FUNC_STATS, + SSS_TOOL_GET_HW_STATS, + SSS_TOOL_CLEAR_HW_STATS, + SSS_TOOL_GET_SELF_TEST_RES, + SSS_TOOL_GET_CHIP_FAULT_STATS, + SSS_TOOL_NIC_RSVD1, + SSS_TOOL_NIC_RSVD2, + SSS_TOOL_NIC_RSVD3, + SSS_TOOL_GET_CHIP_ID, + SSS_TOOL_GET_SINGLE_CARD_INFO, + SSS_TOOL_GET_FIRMWARE_ACTIVE_STATUS, + SSS_TOOL_ROCE_DFX_FUNC, + SSS_TOOL_GET_DEVICE_ID, + SSS_TOOL_GET_PF_DEV_INFO, + SSS_TOOL_CMD_FREE_MEM, + SSS_TOOL_GET_LOOPBACK_MODE = 32, + SSS_TOOL_SET_LOOPBACK_MODE, + SSS_TOOL_SET_LINK_MODE, + SSS_TOOL_SET_PF_BW_LIMIT, + SSS_TOOL_GET_PF_BW_LIMIT, + SSS_TOOL_ROCE_CMD, + SSS_TOOL_GET_POLL_WEIGHT, + SSS_TOOL_SET_POLL_WEIGHT, + SSS_TOOL_GET_HOMOLOGUE, + SSS_TOOL_SET_HOMOLOGUE, + SSS_TOOL_GET_SSET_COUNT, + SSS_TOOL_GET_SSET_ITEMS, + SSS_TOOL_IS_DRV_IN_VM, + SSS_TOOL_LRO_ADPT_MGMT, + SSS_TOOL_SET_INTER_COAL_PARAM, + SSS_TOOL_GET_INTER_COAL_PARAM, + SSS_TOOL_GET_CHIP_INFO, + SSS_TOOL_GET_NIC_STATS_LEN, + SSS_TOOL_GET_NIC_STATS_STRING, + SSS_TOOL_GET_NIC_STATS_INFO, + SSS_TOOL_GET_PF_ID, + SSS_TOOL_NIC_RSVD4, + SSS_TOOL_NIC_RSVD5, + SSS_TOOL_DCB_QOS_INFO, + SSS_TOOL_DCB_PFC_STATE, + SSS_TOOL_DCB_ETS_STATE, + SSS_TOOL_DCB_STATE, + SSS_TOOL_QOS_DEV, + SSS_TOOL_GET_QOS_COS, + SSS_TOOL_GET_ULD_DEV_NAME, + SSS_TOOL_GET_TX_TIMEOUT, + SSS_TOOL_SET_TX_TIMEOUT, + + SSS_TOOL_RSS_CFG = 0x40, + SSS_TOOL_RSS_INDIR, + SSS_TOOL_PORT_ID, + + SSS_TOOL_GET_FUNC_CAP = 0x50, + SSS_TOOL_GET_XSFP_PRESENT = 0x51, + SSS_TOOL_GET_XSFP_INFO = 0x52, + SSS_TOOL_DEV_NAME_TEST = 0x53, + + SSS_TOOL_GET_WIN_STAT = 0x60, + SSS_TOOL_WIN_CSR_READ = 0x61, + SSS_TOOL_WIN_CSR_WRITE = 0x62, + SSS_TOOL_WIN_API_CMD_RD = 0x63, + + SSS_TOOL_VM_COMPAT_TEST = 0xFF +}; + +struct sss_tool_show_item { + char name[SSS_TOOL_SHOW_ITEM_LEN]; + u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ + u8 rsvd[7]; + u64 value; +}; + +struct sss_tool_drv_version_info { + char ver[SSS_TOOL_VERSION_INFO_LEN]; +}; + +#endif /* _SSS_NIC_MT_H_ */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_version.h b/drivers/net/ethernet/3snic/sssnic/include/sss_version.h new file mode 100644 index 00000000000000..6b6edef780d97b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_VERSION_H +#define SSS_VERSION_H + +#define SSS_VERSION_STR "1.1.0.0" + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/Makefile b/drivers/net/ethernet/3snic/sssnic/nic/Makefile new file mode 100644 index 00000000000000..453333021a2f27 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/Makefile @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2023 3SNIC +# + +SYS_TIME=$(shell date +%Y-%m-%d_%H:%M:%S) +ccflags-y += -D __TIME_STR__=\"$(SYS_TIME)\" + +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/kernel +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic/tool +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/tool +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/kernel +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/include + +ccflags-y += -Werror + +obj-$(CONFIG_SSSNIC) += sssnic.o +sssnic-y := sss_nic_main.o \ + sss_nic_tx.o \ + sss_nic_tx_init.o \ + sss_nic_rx.o \ + sss_nic_rx_init.o \ + sss_nic_rx_reset.o \ + sss_nic_rss.o \ + sss_nic_ntuple.o \ + sss_nic_dcb.o \ + sss_nic_ethtool.o \ + sss_nic_ethtool_api.o \ + sss_nic_ethtool_stats.o \ + sss_nic_ethtool_stats_api.o \ + sss_nic_irq.o \ + sss_nic_filter.o \ + sss_nic_netdev_ops.o \ + sss_nic_cfg.o \ + sss_nic_mag_cfg.o \ + sss_nic_vf_cfg.o \ + sss_nic_rss_cfg.o \ + sss_nic_event.o \ + sss_nic_io.o \ + sss_nic_netdev_ops_api.o \ + ./tool/sss_tool_nic_func.o \ + ./tool/sss_tool_nic_dcb.o \ + ./tool/sss_tool_nic_phy_attr.o \ + ./tool/sss_tool_nic_qp_info.o \ + ./tool/sss_tool_nic_stats.o \ + ../hw/sss_hw_main.o \ + ../hw/sss_pci.o \ + ../hw/sss_pci_probe.o \ + ../hw/sss_pci_remove.o \ + ../hw/sss_pci_shutdown.o \ + ../hw/sss_pci_error.o \ + ../hw/sss_pci_sriov.o \ + ../hw/sss_pci_global.o \ + ../hw/sss_hwdev_api.o \ + ../hw/sss_hwdev_cap.o \ + ../hw/sss_hwdev_export.o \ + ../hw/sss_hwdev_link.o \ + ../hw/sss_hwdev_init.o \ + ../hw/sss_hwdev_mgmt_info.o \ + ../hw/sss_hwdev_mgmt_channel.o \ + ../hw/sss_hwdev_io_flush.o \ + ../hw/sss_hwif_ctrlq.o \ + ../hw/sss_hwif_ctrlq_init.o \ + ../hw/sss_hwif_ctrlq_export.o \ + ../hw/sss_hwif_mbx.o \ + ../hw/sss_hwif_mbx_init.o \ + ../hw/sss_hwif_mbx_export.o \ + ../hw/sss_hwif_adm.o \ + ../hw/sss_hwif_adm_init.o \ + ../hw/sss_hwif_init.o \ + ../hw/sss_hwif_api.o \ + ../hw/sss_hwif_export.o \ + ../hw/sss_hwif_eq.o \ + ../hw/sss_hwif_mgmt_init.o \ + ../hw/sss_hwif_irq.o \ + ../hw/sss_hwif_aeq.o \ + ../hw/sss_common.o \ + ../hw/sss_wq.o \ + ../hw/sss_hwif_ceq.o \ + ../hw/sss_adapter_mgmt.o \ + ../hw/tool/sss_tool_main.o \ + ../hw/tool/sss_tool_chip.o \ + ../hw/tool/sss_tool_sdk.o \ + ../hw/tool/sss_tool_sm.o diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h new file mode 100644 index 00000000000000..21b4612f06860f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_DEFINE_H +#define SSS_NIC_CFG_DEFINE_H + +#include "sss_hw_mbx_msg.h" +#include "sss_nic_cfg_mag_define.h" +#include "sss_nic_cfg_vf_define.h" +#include "sss_nic_cfg_rss_define.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_tcam_define.h" + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif + +#define SSSNIC_MBX_OPCODE_SET 1 +#define SSSNIC_MBX_OPCODE_GET 0 + +#define SSSNIC_MBX_OPCODE_ADD 1 +#define SSSNIC_MBX_OPCODE_DEL 0 + +#ifndef BIT +#define BIT(n) (1UL << (n)) +#endif + +#define SSSNIC_MIN_MTU_SIZE 256 + +#define SSSNIC_MAX_JUMBO_FRAME_SIZE 9600 + +#define SSSNIC_PF_SET_VF_ALREADY 0x4 + +#define SSSNIC_LOWEST_LATENCY 1 + +#define SSSNIC_MAX_FEATURE_QWORD 4 + +#define SSSNIC_MBX_OPCODE_GET_DCB_STATE 0 +#define SSSNIC_MBX_OPCODE_SET_DCB_STATE 1 +#define SSSNIC_DCB_STATE_DISABLE 0 +#define SSSNIC_DCB_STATE_ENABLE 1 + +#define SSSNIC_STD_SFP_INFO_MAX_SIZE 640 + +#define SSSNIC_BIOS_SIGNATURE 0x1923E518 +#define SSSNIC_BIOS_FUN_VALID 1 +#define SSSNIC_BIOS_FUN_INVALID 0 + +enum sss_nic_func_tbl_cfg_type { + SSSNIC_FUNC_CFG_TYPE_INIT, + SSSNIC_FUNC_CFG_TYPE_RX_BUF_SIZE, + SSSNIC_FUNC_CFG_TYPE_MTU, +}; + +enum sss_nic_feature_cap { + SSSNIC_F_CSUM = BIT(0), + SSSNIC_F_SCTP_CRC = BIT(1), + SSSNIC_F_TSO = BIT(2), + SSSNIC_F_LRO = BIT(3), + SSSNIC_F_UFO = BIT(4), + SSSNIC_F_RSS = BIT(5), + SSSNIC_F_RX_VLAN_FILTER = BIT(6), + SSSNIC_F_RX_VLAN_STRIP = BIT(7), + SSSNIC_F_TX_VLAN_INSERT = BIT(8), + SSSNIC_F_VXLAN_OFFLOAD = BIT(9), + SSSNIC_F_IPSEC_OFFLOAD = BIT(10), + SSSNIC_F_FDIR = BIT(11), + SSSNIC_F_PROMISC = BIT(12), + SSSNIC_F_ALLMULTI = BIT(13), + SSSNIC_F_XSFP_REPORT = BIT(14), + SSSNIC_F_VF_MAC = BIT(15), + SSSNIC_F_RATE_LIMIT = BIT(16), + SSSNIC_F_RXQ_RECOVERY = BIT(17), +}; + +/* BIOS CONF */ +enum { + SSSNIC_NVM_PF_SPEED_LIMIT = BIT(6), +}; + +/* Commands between NIC to MPU */ +enum sss_nic_mbx_opcode { + SSSNIC_MBX_OPCODE_VF_REGISTER = 0, /* only for PFD and VFD */ + + /* FUNC CFG */ + SSSNIC_MBX_OPCODE_SET_FUNC_TBL = 5, + SSSNIC_MBX_OPCODE_SET_VPORT_ENABLE, + SSSNIC_MBX_OPCODE_SET_RX_MODE, + SSSNIC_MBX_OPCODE_SQ_CI_ATTR_SET, + SSSNIC_MBX_OPCODE_GET_VPORT_STAT, + SSSNIC_MBX_OPCODE_CLEAN_VPORT_STAT, + SSSNIC_MBX_OPCODE_CLEAR_QP_RESOURCE, + SSSNIC_MBX_OPCODE_CFG_FLEX_QUEUE, + /* LRO CFG */ + SSSNIC_MBX_OPCODE_CFG_RX_LRO, + SSSNIC_MBX_OPCODE_CFG_LRO_TIMER, + SSSNIC_MBX_OPCODE_FEATURE_NEGO, + SSSNIC_MBX_OPCODE_CFG_LOCAL_LRO_STATE, + + SSSNIC_MBX_OPCODE_CACHE_OUT_QP_RES, + /* MAC & VLAN CFG */ + SSSNIC_MBX_OPCODE_GET_MAC = 20, + SSSNIC_MBX_OPCODE_SET_MAC, + SSSNIC_MBX_OPCODE_DEL_MAC, + SSSNIC_MBX_OPCODE_UPDATE_MAC, + SSSNIC_MBX_OPCODE_GET_ALL_DEFAULT_MAC, + + SSSNIC_MBX_OPCODE_CFG_FUNC_VLAN, + SSSNIC_MBX_OPCODE_SET_VLAN_FILTER_EN, + SSSNIC_MBX_OPCODE_SET_RX_VLAN_OFFLOAD, + SSSNIC_MBX_OPCODE_SMAC_CHECK_STATE, + + /* SR-IOV */ + SSSNIC_MBX_OPCODE_CFG_VF_VLAN = 40, + SSSNIC_MBX_OPCODE_SET_SPOOPCHK_STATE, + /* RATE LIMIT */ + SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + + /* RSS CFG */ + SSSNIC_MBX_OPCODE_RSS_CFG = 60, + SSSNIC_MBX_OPCODE_RSS_TEMP_MGR, + SSSNIC_MBX_OPCODE_GET_RSS_CTX_TBL, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_KEY, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_ENGINE, + SSSNIC_MBX_OPCODE_SET_RSS_CTX_TBL_INTO_FUNC, + + /* IP checksum error packets, enable rss quadruple hash */ + SSSNIC_MBX_OPCODE_IPCS_ERR_RSS_ENABLE_OP = 66, + + /* PPA/FDIR */ + SSSNIC_MBX_OPCODE_ADD_TC_FLOW = 80, + SSSNIC_MBX_OPCODE_DEL_TC_FLOW, + SSSNIC_MBX_OPCODE_GET_TC_FLOW, + SSSNIC_MBX_OPCODE_FLUSH_TCAM, + SSSNIC_MBX_OPCODE_CFG_TCAM_BLOCK, + SSSNIC_MBX_OPCODE_ENABLE_TCAM, + SSSNIC_MBX_OPCODE_GET_TCAM_BLOCK, + SSSNIC_MBX_OPCODE_CFG_PPA_TABLE_ID, + SSSNIC_MBX_OPCODE_SET_PPA_EN = 88, + SSSNIC_MBX_OPCODE_CFG_PPA_MODE, + SSSNIC_MBX_OPCODE_CFG_PPA_FLUSH, + SSSNIC_MBX_OPCODE_SET_FDIR_STATUS, + SSSNIC_MBX_OPCODE_GET_PPA_COUNTER, + + /* PORT CFG */ + SSSNIC_MBX_OPCODE_SET_PORT_ENABLE = 100, + SSSNIC_MBX_OPCODE_CFG_PAUSE_INFO, + + SSSNIC_MBX_OPCODE_SET_PORT_CAR, + SSSNIC_MBX_OPCODE_SET_ER_DROP_PKT, + + SSSNIC_MBX_OPCODE_GET_VF_COS, + SSSNIC_MBX_OPCODE_SETUP_COS_MAPPING, + SSSNIC_MBX_OPCODE_SET_ETS, + SSSNIC_MBX_OPCODE_SET_PFC, + SSSNIC_MBX_OPCODE_QOS_ETS, + SSSNIC_MBX_OPCODE_QOS_PFC, + SSSNIC_MBX_OPCODE_QOS_DCB_STATE, + SSSNIC_MBX_OPCODE_QOS_PORT_CFG, + SSSNIC_MBX_OPCODE_QOS_MAP_CFG, + SSSNIC_MBX_OPCODE_FORCE_PKT_DROP, + SSSNIC_MBX_OPCODE_TX_PAUSE_EXCP_NOTICE = 118, + SSSNIC_MBX_OPCODE_INQUIRT_PAUSE_CFG = 119, + + /* MISC */ + SSSNIC_MBX_OPCODE_BIOS_CFG = 120, + SSSNIC_MBX_OPCODE_SET_FIRMWARE_CUSTOM_PACKETS_MSG, + + /* BOND */ + SSSNIC_MBX_OPCODE_BOND_DEV_CREATE = 134, + SSSNIC_MBX_OPCODE_BOND_DEV_DELETE, + SSSNIC_MBX_OPCODE_BOND_DEV_OPEN_CLOSE, + SSSNIC_MBX_OPCODE_BOND_INFO_GET, + SSSNIC_MBX_OPCODE_BOND_ACTIVE_INFO_GET, + SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE, + + /* DFX */ + SSSNIC_MBX_OPCODE_GET_SM_TABLE = 140, + SSSNIC_MBX_OPCODE_RD_LINE_TBL, + + SSSNIC_MBX_OPCODE_SET_UCAPTURE_OPT = 160, + SSSNIC_MBX_OPCODE_SET_VHD_CFG, + + /* move to SSSLINK */ + SSSNIC_MBX_OPCODE_GET_PORT_STAT = 200, + SSSNIC_MBX_OPCODE_CLEAN_PORT_STAT, + SSSNIC_MBX_OPCODE_CFG_LOOPBACK_MODE, + SSSNIC_MBX_OPCODE_GET_SFP_QSFP_INFO, + SSSNIC_MBX_OPCODE_SET_SFP_STATUS, + SSSNIC_MBX_OPCODE_GET_LIGHT_MODULE_ABS, + SSSNIC_MBX_OPCODE_GET_LINK_INFO, + SSSNIC_MBX_OPCODE_CFG_AN_TYPE, + SSSNIC_MBX_OPCODE_GET_PORT_INFO, + SSSNIC_MBX_OPCODE_SET_LINK_SETTINGS, + SSSNIC_MBX_OPCODE_ACTIVATE_BIOS_LINK_CFG, + SSSNIC_MBX_OPCODE_RESTORE_LINK_CFG, + SSSNIC_MBX_OPCODE_SET_LINK_FOLLOW, + SSSNIC_MBX_OPCODE_GET_LINK_STATE, + SSSNIC_MBX_OPCODE_LINK_STATUS_REPORT, + SSSNIC_MBX_OPCODE_CABLE_PLUG_EVENT, + SSSNIC_MBX_OPCODE_LINK_ERR_EVENT, + SSSNIC_MBX_OPCODE_SET_LED_STATUS, + + SSSNIC_MBX_OPCODE_MAX = 256, +}; + +/* NIC CTRLQ MODE */ +enum sss_nic_ctrlq_opcode { + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX = 0, + SSSNIC_CTRLQ_OPCODE_CLEAN_QUEUE_CONTEXT, + SSSNIC_CTRLQ_OPCODE_ARM_SQ, + SSSNIC_CTRLQ_OPCODE_ARM_RQ, + SSSNIC_CTRLQ_OPCODE_SET_RSS_INDIR_TABLE, + SSSNIC_CTRLQ_OPCODE_SET_RSS_CONTEXT_TABLE, + SSSNIC_CTRLQ_OPCODE_GET_RSS_INDIR_TABLE, + SSSNIC_CTRLQ_OPCODE_GET_RSS_CONTEXT_TABLE, + SSSNIC_CTRLQ_OPCODE_SET_IQ_ENABLE, + SSSNIC_CTRLQ_OPCODE_SET_RQ_FLUSH = 10, + SSSNIC_CTRLQ_OPCODE_MODIFY_VLAN_CTX, + SSSNIC_CTRLQ_OPCODE_PPA_HASH_TABLE, + SSSNIC_CTRLQ_OPCODE_RXQ_INFO_GET = 13, +}; + +struct sss_nic_rq_pc_info { + u16 hw_pi; + u16 hw_ci; +}; + +struct sss_nic_rq_hw_info { + u32 func_id; + u32 num_queues; + u32 rsvd[14]; +}; + +struct sss_nic_mbx_feature_nego { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 feature[SSSNIC_MAX_FEATURE_QWORD]; +}; + +struct sss_nic_mbx_mac_addr { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct sss_nic_mbx_mac_update { + struct sss_nic_mbx_mac_addr old_mac; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct sss_nic_mbx_vport_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u8 state; /* 0--disable, 1--enable */ + u8 rsvd2[3]; +}; + +struct sss_nic_mbx_clear_qp_resource { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_mbx_invalid_qp_cache { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_mbx_port_stats_info { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_port_stats { + u64 tx_unicast_pkts; + u64 tx_unicast_bytes; + u64 tx_multicast_pkts; + u64 tx_multicast_bytes; + u64 tx_broadcast_pkts; + u64 tx_broadcast_bytes; + + u64 rx_unicast_pkts; + u64 rx_unicast_bytes; + u64 rx_multicast_pkts; + u64 rx_multicast_bytes; + u64 rx_broadcast_pkts; + u64 rx_broadcast_bytes; + + u64 tx_discard; + u64 rx_discard; + u64 tx_err; + u64 rx_err; +}; + +struct sss_nic_mbx_port_stats { + struct sss_mgmt_msg_head head; + + u32 stats_size; + u32 rsvd1; + struct sss_nic_port_stats stats; + u64 rsvd2[6]; +}; + +struct sss_nic_func_table_cfg { + u16 rx_wqe_buf_size; + u16 mtu; + u32 rsvd[9]; +}; + +struct sss_nic_mbx_set_func_table { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + + u32 cfg_bitmap; + struct sss_nic_func_table_cfg tbl_cfg; +}; + +struct sss_nic_mbx_intr_attr { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_id; + u32 l2nic_sqn; + u32 rsvd; + u64 ci_addr; +}; + +struct sss_nic_mbx_offload_vlan { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 vlan_offload; + u8 rsvd1[5]; +}; + +struct sss_nic_mbx_lro_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_pkt_len; /* unit is 1K */ + u8 resv2[13]; +}; + +struct sss_nic_mbx_lro_timer { + struct sss_mgmt_msg_head head; + + u8 opcode; /* 1: set timer value, 0: get timer value */ + u8 rsvd1; + u16 rsvd2; + u32 timer; +}; + +struct sss_nic_mbx_vf_vlan_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u8 qos; + u8 rsvd2[5]; +}; + +struct sss_nic_mbx_set_spoofchk { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 state; + u8 rsvd1; +}; + +struct sss_nic_mbx_tx_rate_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 min_rate; + u32 max_rate; + u8 rsvd2[8]; +}; + +struct sss_nic_mbx_attach_vf { + struct sss_mgmt_msg_head head; + + u8 op_register; /* 0 - unregister, 1 - register */ + u8 rsvd1[3]; + u32 extra_feature; + u8 rsvd2[32]; +}; + +struct sss_nic_mbx_vlan_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u16 rsvd2; +}; + +/* set vlan filter */ +struct sss_nic_mbx_vlan_filter_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 resvd[2]; + u32 vlan_filter_ctrl; /* bit0:vlan filter en; bit1:broadcast_filter_en */ +}; + +struct sss_nic_mbx_force_drop_pkt { + struct sss_mgmt_msg_head head; + + u8 port; + u8 rsvd1[3]; +}; + +struct sss_nic_mbx_set_rx_mode { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct sss_nic_mbx_rss_ctx { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 context; +}; + +struct sss_nic_mbx_rss_engine_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct sss_nic_mbx_rss_key_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 key[SSSNIC_RSS_KEY_SIZE]; +}; + +struct sss_nic_mbx_rss_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 rss_en; + u8 rq_priority_number; + u8 prio_tc[SSSNIC_DCB_COS_MAX]; + u16 qp_num; + u16 rsvd1; +}; + +struct sss_nic_mbx_vf_dcb_cfg { + struct sss_mgmt_msg_head head; + + struct sss_nic_dcb_info dcb_info; +}; + +struct sss_nic_mbx_dcb_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 op_code; /* 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 0 - disable, 1 - enable dcb */ + u8 port_state; /* 0 - disable, 1 - enable dcb */ + u8 rsvd[7]; +}; + +struct sss_nic_mbx_pause_cfg { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 opcode; + u16 rsvd1; + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; + u8 rsvd2[5]; +}; + +/* pfc/pause tx abnormal */ +struct sss_nic_msg_tx_pause_info { + struct sss_mgmt_msg_head head; + + u32 tx_pause_except; /* 1: 异常,0: 正常 */ + u32 except_level; + u32 rsvd; +}; + +struct sss_nic_mbx_set_tcam_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 tcam_enable; + u8 rsvd1; + u32 rsvd2; +}; + +/* alloc tcam block output struct */ +struct sss_nic_mbx_tcam_block_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; /* func_id */ + u8 alloc_en; + u8 tcam_type; /* 0: 16 size tcam block, 1: 0 size tcam block */ + u16 tcam_block_index; + u16 mpu_alloc_block_size; +}; + +struct sss_nic_mbx_flush_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; /* func_id */ + u16 rsvd; +}; + +struct sss_nic_mbx_add_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 type; + u8 rsvd; + struct sss_nic_tcam_rule_cfg rule; +}; + +struct sss_nic_mbx_del_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 type; + u8 rsvd; + u32 index_start; + u32 index_num; +}; + +/* note:must 4 byte align */ +struct sss_nic_bios_cfg { + u32 signature; /* check flash data valid */ + u8 pxe_en; /* PXE enable: 0 - disable 1 - enable */ + u8 extend_mode; + u8 rsvd0[2]; + u8 pxe_vlan_en; /* PXE VLAN enable: 0 - disable 1 - enable */ + u8 pxe_vlan_pri; /* PXE VLAN priority: 0-7 */ + u16 pxe_vlan_id; /* PXE VLAN ID 1-4094 */ + u32 service_mode; /* refer to CHIPIF_SERVICE_MODE_x macro */ + u32 pf_bw; /* PF rate,percent 0-100 */ + u8 speed; /* enum of port speed */ + u8 auto_neg; /* 0 - invalid 1 - open 2 - close */ + u8 lanes; /* lane num */ + u8 fec; /* FEC mode, refer to enum mag_cmd_port_fec */ + u8 auto_adapt; /* 0 - invalid 1 - open 2 - close */ + u8 func_valid; /* 0 - func_id is invalid,other - func_id is valid */ + u8 func_id; + u8 sriov_en; /* SRIOV-EN: 0 - invalid, 1 - open, 2 - close */ +}; + +struct sss_nic_mbx_bios_cfg { + struct sss_mgmt_msg_head head; + u32 op_code; /* Operation Code: Bit0[0: read 1:write, BIT1-6: cfg_mask */ + struct sss_nic_bios_cfg bios_cfg; +}; + +/* lacp status update */ +struct sss_nic_msg_bond_active_info { + struct sss_mgmt_msg_head head; + u32 bond_id; + u32 bon_mmi_status; /* bond link state */ + u32 active_bitmap; /* slave port state */ + + u8 rsvd[16]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h new file mode 100644 index 00000000000000..73bbeb34f6429f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h @@ -0,0 +1,460 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_MAG_DEFINE_H +#define SSS_NIC_CFG_MAG_DEFINE_H + +#include +#include +#include + +#include "sss_hw_mbx_msg.h" + +/* * + * Definition of the NIC receiving mode + */ +#define SSSNIC_RX_MODE_UC 0x01 +#define SSSNIC_RX_MODE_MC 0x02 +#define SSSNIC_RX_MODE_BC 0x04 +#define SSSNIC_RX_MODE_MC_ALL 0x08 +#define SSSNIC_RX_MODE_PROMISC 0x10 + +#define SSSNIC_RX_RATE_LOW 200000 +#define SSSNIC_RX_COAL_TIME_LOW 25 +#define SSSNIC_RX_PENDING_LIMIT_LOW 2 + +#define SSSNIC_RX_RATE_HIGH 700000 +#define SSSNIC_RX_COAL_TIME_HIGH 225 +#define SSSNIC_RX_PENDING_LIMIT_HIGH 8 + +#define SSSNIC_RX_RATE_THRESH 50000 +#define SSSNIC_TX_RATE_THRESH 50000 +#define SSSNIC_RX_RATE_LOW_VM 100000 +#define SSSNIC_RX_PENDING_LIMIT_HIGH_VM 87 + +#define SSSNIC_MAX_LIMIT_BW 100 + +#define SSSNIC_MAG_OPCODE_PORT_DISABLE 0x0 +#define SSSNIC_MAG_OPCODE_TX_ENABLE 0x1 +#define SSSNIC_MAG_OPCODE_RX_ENABLE 0x2 + +#define SSSNIC_XSFP_INFO_MAX_SIZE 640 + +#define SSNSIC_PORT_PRESENT 0 +#define SSNSIC_PORT_ABSENT 1 + +enum sss_nic_valid_link_settings { + SSSNIC_LINK_SET_SPEED = 0x1, + SSSNIC_LINK_SET_AUTONEG = 0x2, + SSSNIC_LINK_SET_FEC = 0x4, +}; + +enum sss_nic_link_follow_status { + SSSNIC_LINK_FOLLOW_DEFAULT, + SSSNIC_LINK_FOLLOW_PORT, + SSSNIC_LINK_FOLLOW_SEPARATE, + SSSNIC_LINK_FOLLOW_STATUS_MAX, +}; + +/* serdes/mag message cmd define */ +enum sss_nic_mag_opcode { + SSSNIC_MAG_OPCODE_SERDES_PROCESS = 0, + + /* port configure, 0-29 */ + SSSNIC_MAG_OPCODE_SET_PORT_CFG = 1, + SSSNIC_MAG_OPCODE_SET_PORT_ADAPT = 2, + SSSNIC_MAG_OPCODE_CFG_LOOPBACK_MODE = 3, + + SSSNIC_MAG_OPCODE_GET_PORT_ENABLE = 5, + SSSNIC_MAG_OPCODE_SET_PORT_ENABLE = 6, + SSSNIC_MAG_OPCODE_LINK_STATUS = 7, + SSSNIC_MAG_OPCODE_SET_LINK_FOLLOW = 8, + SSSNIC_MAG_OPCODE_SET_PMA_ENABLE = 9, + SSSNIC_MAG_OPCODE_CFG_FEC_MODE = 10, + + SSSNIC_MAG_OPCODE_CFG_AN_TYPE = 12, /* reserved for future use */ + SSSNIC_MAG_OPCODE_CFG_LINK_TIME = 13, + + /* bios link, 30-49 */ + SSSNIC_MAG_OPCODE_CFG_BIOS_LINK_CFG = 31, + SSSNIC_MAG_OPCODE_RESTORE_LINK_CFG = 32, + SSSNIC_MAG_OPCODE_ACTIVATE_BIOS_LINK_CFG = 33, + + /* LED */ + SSSNIC_MAG_OPCODE_SET_LED_CFG = 50, + + /* PHY */ + SSSNIC_MAG_OPCODE_GET_PHY_INIT_STATUS = 55, /* reserved for future use */ + + /* sfp */ + SSSNIC_MAG_OPCODE_GET_XSFP_INFO = 60, + SSSNIC_MAG_OPCODE_SET_XSFP_ENABLE = 61, + SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT = 62, + /* sfp/qsfp single byte read/write, for equipment test */ + SSSNIC_MAG_OPCODE_SET_XSFP_RW = 63, + SSSNIC_MAG_OPCODE_CFG_XSFP_TEMPERATURE = 64, + + /* event 100-149 */ + SSSNIC_MAG_OPCODE_WIRE_EVENT = 100, + SSSNIC_MAG_OPCODE_LINK_ERR_EVENT = 101, + + /* DFX、Counter */ + SSSNIC_MAG_OPCODE_EVENT_PORT_INFO = 150, + SSSNIC_MAG_OPCODE_GET_PORT_STAT = 151, + SSSNIC_MAG_OPCODE_CLR_PORT_STAT = 152, + SSSNIC_MAG_OPCODE_GET_PORT_INFO = 153, + SSSNIC_MAG_OPCODE_GET_PCS_ERR_CNT = 154, + SSSNIC_MAG_OPCODE_GET_MAG_CNT = 155, + SSSNIC_MAG_OPCODE_DUMP_ANTRAIN_INFO = 156, + + SSSNIC_MAG_OPCODE_MAX = 0xFF +}; + +enum sss_nic_mag_opcode_port_speed { + SSSNIC_PORT_SPEED_NOT_SET = 0, + SSSNIC_PORT_SPEED_10MB = 1, + SSSNIC_PORT_SPEED_100MB = 2, + SSSNIC_PORT_SPEED_1GB = 3, + SSSNIC_PORT_SPEED_10GB = 4, + SSSNIC_PORT_SPEED_25GB = 5, + SSSNIC_PORT_SPEED_40GB = 6, + SSSNIC_PORT_SPEED_50GB = 7, + SSSNIC_PORT_SPEED_100GB = 8, + SSSNIC_PORT_SPEED_200GB = 9, + SSSNIC_PORT_SPEED_UNKNOWN +}; + +enum sss_nic_mag_opcode_port_an { + SSSNIC_PORT_AN_NOT_SET = 0, + SSSNIC_PORT_CFG_AN_ON = 1, + SSSNIC_PORT_CFG_AN_OFF = 2 +}; + +/* mag supported/advertised link mode bitmap */ +enum mag_cmd_link_mode { + SSSNIC_LINK_MODE_GE = 0, + SSSNIC_LINK_MODE_10GE_BASE_R = 1, + SSSNIC_LINK_MODE_25GE_BASE_R = 2, + SSSNIC_LINK_MODE_40GE_BASE_R4 = 3, + SSSNIC_LINK_MODE_50GE_BASE_R = 4, + SSSNIC_LINK_MODE_50GE_BASE_R2 = 5, + SSSNIC_LINK_MODE_100GE_BASE_R = 6, + SSSNIC_LINK_MODE_100GE_BASE_R2 = 7, + SSSNIC_LINK_MODE_100GE_BASE_R4 = 8, + SSSNIC_LINK_MODE_200GE_BASE_R2 = 9, + SSSNIC_LINK_MODE_200GE_BASE_R4 = 10, + SSSNIC_LINK_MODE_MAX_NUMBERS, + + SSSNIC_LINK_MODE_UNKNOWN = 0xFFFF +}; + +/* led type */ +enum sss_nic_mag_led_type { + SSSNIC_MAG_LED_TYPE_ALARM = 0x0, + SSSNIC_MAG_LED_TYPE_LOW_SPEED = 0x1, + SSSNIC_MAG_LED_TYPE_HIGH_SPEED = 0x2 +}; + +/* led mode */ +enum sss_nic_mag_led_mode { + SSSNIC_MAG_LED_DEFAULT = 0x0, + SSSNIC_MAG_LED_FORCE_ON = 0x1, + SSSNIC_MAG_LED_FORCE_OFF = 0x2, + SSSNIC_MAG_LED_FORCE_BLINK_1HZ = 0x3, + SSSNIC_MAG_LED_FORCE_BLINK_2HZ = 0x4, + SSSNIC_MAG_LED_FORCE_BLINK_4HZ = 0x5, + SSSNIC_MAG_LED_1HZ = 0x6, + SSSNIC_MAG_LED_2HZ = 0x7, + SSSNIC_MAG_LED_4HZ = 0x8 +}; + +/* xsfp wire type, refer to cmis protocol definition */ +enum sss_nic_mag_wire_type { + SSSNIC_MAG_WIRE_TYPE_UNKNOWN = 0x0, + SSSNIC_MAG_WIRE_TYPE_MM = 0x1, + SSSNIC_MAG_WIRE_TYPE_SM = 0x2, + SSSNIC_MAG_WIRE_TYPE_COPPER = 0x3, + SSSNIC_MAG_WIRE_TYPE_ACC = 0x4, + SSSNIC_MAG_WIRE_TYPE_BASET = 0x5, + SSSNIC_MAG_WIRE_TYPE_AOC = 0x40, + SSSNIC_MAG_WIRE_TYPE_ELECTRIC = 0x41, + SSSNIC_MAG_WIRE_TYPE_BACKPLANE = 0x42 +}; + +enum sss_nic_link_status { + SSSNIC_LINK_DOWN = 0, + SSSNIC_LINK_UP +}; + +struct sss_nic_link_ksettings { + u32 valid_bitmap; + u8 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ +}; + +struct sss_nic_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 fec; + u32 supported_mode; + u32 advertised_mode; +}; + +struct sss_nic_pause_cfg { + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; +}; + +struct sss_nic_mbx_mag_set_port_cfg { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u32 config_bitmap; + u8 speed; + u8 autoneg; + u8 fec; + u8 lanes; + u8 rsvd1[20]; +}; + +struct sss_nic_mbx_get_port_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u8 wire_type; + u8 an_support; + u8 an_en; + u8 duplex; + + u8 speed; + u8 fec; + u8 lanes; + u8 rsvd1; + + u32 supported_mode; + u32 advertised_mode; + u8 rsvd2[8]; +}; + +struct sss_nic_mbx_loopback_mode { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get loopback mode 1:set loopback mode */ + u8 mode; + u8 en; /* 0:disable 1:enable */ + + u32 rsvd0[2]; +}; + +struct sss_nic_mbx_set_port_mag_state { + struct sss_mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 state; /* bitmap bit0:tx_en bit1:rx_en */ + u8 rsvd1[3]; +}; + +/* the physical port disable link follow only when all pf of the port are set to follow disable */ +struct sss_nic_mbx_set_link_follow { + struct sss_mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 follow; + u8 rsvd1[3]; +}; + +/* firmware also use this cmd report link event to driver */ +struct sss_nic_mbx_get_link_state { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:link down 1:link up */ + u8 rsvd0[2]; +}; + +/* the led is report alarm when any pf of the port is alram */ +struct sss_nic_mbx_set_led_cfg { + struct sss_mgmt_msg_head head; + + u16 function_id; + u8 type; + u8 mode; +}; + +struct sss_nic_mbx_get_xsfp_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 wire_type; + u16 out_len; + u32 rsvd; + u8 sfp_info[SSSNIC_XSFP_INFO_MAX_SIZE]; +}; + +struct sss_nic_mbx_get_xsfp_present { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +struct sss_nic_cache_port_sfp { + u8 mpu_send_sfp_info; + u8 mpu_send_sfp_abs; + u8 rsvd[2]; + struct sss_nic_mbx_get_xsfp_info std_sfp_info; + struct sss_nic_mbx_get_xsfp_present abs; +}; + +/* xsfp plug event */ +struct sss_nic_mag_wire_event { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +struct sss_nic_mag_port_stats { + u64 tx_fragment_pkts; + u64 tx_undersize_pkts; + u64 tx_undermin_pkts; + u64 tx_64_oct_pkts; + u64 tx_65_127_oct_pkts; + u64 tx_128_255_oct_pkts; + u64 tx_256_511_oct_pkts; + u64 tx_512_1023_oct_pkts; + u64 tx_1024_1518_oct_pkts; + u64 tx_1519_2047_oct_pkts; + u64 tx_2048_4095_oct_pkts; + u64 tx_4096_8191_oct_pkts; + u64 tx_8192_9216_oct_pkts; + u64 tx_9217_12287_oct_pkts; + u64 tx_12288_16383_oct_pkts; + u64 tx_1519_max_bad_pkts; + u64 tx_1519_max_good_pkts; + u64 tx_oversize_pkts; + u64 tx_jabber_pkts; + u64 tx_bad_pkts; + u64 tx_bad_octs; + u64 tx_good_pkts; + u64 tx_good_octs; + u64 tx_total_pkts; + u64 tx_total_octs; + u64 tx_uni_pkts; + u64 tx_multi_pkts; + u64 tx_broad_pkts; + u64 tx_pauses; + u64 tx_pfc_pkts; + u64 tx_pfc_pri0_pkts; + u64 tx_pfc_pri1_pkts; + u64 tx_pfc_pri2_pkts; + u64 tx_pfc_pri3_pkts; + u64 tx_pfc_pri4_pkts; + u64 tx_pfc_pri5_pkts; + u64 tx_pfc_pri6_pkts; + u64 tx_pfc_pri7_pkts; + u64 tx_control_pkts; + u64 tx_err_all_pkts; + u64 tx_from_app_good_pkts; + u64 tx_from_app_bad_pkts; + + u64 rx_fragment_pkts; + u64 rx_undersize_pkts; + u64 rx_undermin_pkts; + u64 rx_64_oct_pkts; + u64 rx_65_127_oct_pkts; + u64 rx_128_255_oct_pkts; + u64 rx_256_511_oct_pkts; + u64 rx_512_1023_oct_pkts; + u64 rx_1024_1518_oct_pkts; + u64 rx_1519_2047_oct_pkts; + u64 rx_2048_4095_oct_pkts; + u64 rx_4096_8191_oct_pkts; + u64 rx_8192_9216_oct_pkts; + u64 rx_9217_12287_oct_pkts; + u64 rx_12288_16383_oct_pkts; + u64 rx_1519_max_bad_pkts; + u64 rx_1519_max_good_pkts; + u64 rx_oversize_pkts; + u64 rx_jabber_pkts; + u64 rx_bad_pkts; + u64 rx_bad_octs; + u64 rx_good_pkts; + u64 rx_good_octs; + u64 rx_total_pkts; + u64 rx_total_octs; + u64 rx_uni_pkts; + u64 rx_multi_pkts; + u64 rx_broad_pkts; + u64 rx_pauses; + u64 rx_pfc_pkts; + u64 rx_pfc_pri0_pkts; + u64 rx_pfc_pri1_pkts; + u64 rx_pfc_pri2_pkts; + u64 rx_pfc_pri3_pkts; + u64 rx_pfc_pri4_pkts; + u64 rx_pfc_pri5_pkts; + u64 rx_pfc_pri6_pkts; + u64 rx_pfc_pri7_pkts; + u64 rx_control_pkts; + u64 rx_sym_err_pkts; + u64 rx_fcs_err_pkts; + u64 rx_send_app_good_pkts; + u64 rx_send_app_bad_pkts; + u64 rx_unfilter_pkts; +}; + +struct sss_nic_mbx_mag_port_stats_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +struct sss_nic_mbx_mag_port_stats { + struct sss_mgmt_msg_head head; + + struct sss_nic_mag_port_stats counter; + u64 rsvd1[15]; +}; + +struct sss_nic_mag_cfg { + struct semaphore cfg_lock; + + /* Valid when pfc is disable */ + u8 pause_set; + u8 rsvd1[3]; + struct sss_nic_pause_cfg nic_pause; + + u8 pfc_en; + u8 pfc_bitmap; + u8 rsvd2[2]; + + struct sss_nic_port_info port_info; + + /* percentage of pf link bandwidth */ + u32 pf_bw_limit; + + struct sss_nic_cache_port_sfp rt_cmd; + struct mutex sfp_mutex; /* mutex used for copy sfp info */ +}; + +#define SSSNIC_PF_LIMIT_BW_MAX 100 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h new file mode 100644 index 00000000000000..adfb3eae339618 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_RSS_DEFINE_H +#define SSS_NIC_CFG_RSS_DEFINE_H + +#include + +/* rss */ +#define SSSNIC_RSS_TYPE_VALID_SHIFT 23 +#define SSSNIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define SSSNIC_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define SSSNIC_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define SSSNIC_RSS_TYPE_IPV6_SHIFT 27 +#define SSSNIC_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define SSSNIC_RSS_TYPE_IPV4_SHIFT 29 +#define SSSNIC_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define SSSNIC_RSS_TYPE_UDP_IPV4_SHIFT 31 + +#define SSSNIC_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << SSSNIC_RSS_TYPE_##member##_SHIFT) +#define SSSNIC_RSS_TYPE_GET(val, member) (((u32)(val) >> SSSNIC_RSS_TYPE_##member##_SHIFT) & 0x1) + +#define SSSNIC_RSS_KEY_RSV_NUM 2 + +#define SSSNIC_RSS_INDIR_SIZE 256 +#define SSSNIC_RSS_KEY_SIZE 40 + +enum sss_nic_rss_hash_engine_type { + SSSNIC_RSS_ENGINE_XOR = 0, + SSSNIC_RSS_ENGINE_TOEP, + SSSNIC_RSS_ENGINE_MAX, +}; + +struct sss_nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +/* rss */ +struct sss_nic_rss_indirect_table { + u32 rsvd[4]; /* Make sure that 16B beyond entry[] */ + u16 entry[SSSNIC_RSS_INDIR_SIZE]; +}; + +struct sss_nic_rss_ctx_table { + u32 rsvd[4]; + u32 ctx; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h new file mode 100644 index 00000000000000..b9aaa38104a005 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_VF_DEFINE_H +#define SSS_NIC_CFG_VF_DEFINE_H + +#include + +#define SSSNIC_OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define SSSNIC_HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define SSSNIC_VLAN_PRIORITY_SHIFT 13 + +#define SSSNIC_CONFIG_ALL_QUEUE_VLAN_CTX 0xFFFF + +#define SSSNIC_GET_VLAN_PRIO(vlan, qos) \ + ((u16)((vlan) | ((qos) << SSSNIC_VLAN_PRIORITY_SHIFT))) + +struct sss_nic_vlan_ctx { + u32 func_id; + u32 qid; /* if qid = 0xFFFF, config current function all queue */ + u32 tag; + u32 mode; + u32 sel; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h new file mode 100644 index 00000000000000..3924d9f9b8eead --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_COMMON_H +#define SSS_NIC_COMMON_H + +#include + +#include "sss_kernel.h" +#include "sss_version.h" + +#define SSSNIC_DRV_NAME "sssnic" +#define SSSNIC_DRV_VERSION SSS_VERSION_STR + +#define SSSNIC_FUNC_IS_VF(hwdev) (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + +#define SSSNIC_MODERATONE_DELAY HZ + +#define SSSNIC_LP_PKT_CNT 64 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h new file mode 100644 index 00000000000000..946928c7199de0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DCB_DEFINE_H +#define SSS_NIC_DCB_DEFINE_H + +#include + +#define SSSNIC_PCP_UP_MAX 8 +#define SSSNIC_DSCP_MAC_UP 64 + +/* IEEE 802.1Qaz std */ +#define SSSNIC_DCB_COS_MAX 0x8 +#define SSSNIC_DCB_UP_MAX 0x8 +#define SSSNIC_DCB_TC_MAX 0x8 +#define SSSNIC_DCB_PG_MAX 0x8 +#define SSSNIC_DCB_TSA_SP 0x0 +#define SSSNIC_DCB_TSA_CBS 0x1 +#define SSSNIC_DCB_TSA_ETS 0x2 +#define SSSNIC_DCB_DSCP_NUM 0x8 +#define SSSNIC_DCB_IP_PRI_MAX 0x40 + +#define SSSNIC_DCB_PRIO_DWRR 0x0 +#define SSSNIC_DCB_PRIO_STRICT 0x1 + +#define SSSNIC_DCB_MAX_PFC_NUM 0x4 + +struct sss_nic_dcb_config { + u8 trust; /* pcp, dscp */ + u8 default_cos; + u8 pcp_user_cos_num; + u8 pcp_valid_cos_map; + u8 dscp_user_cos_num; + u8 dscp_valid_cos_map; + u8 pcp2cos[SSSNIC_PCP_UP_MAX]; + u8 dscp2cos[SSSNIC_DSCP_MAC_UP]; + + u8 cos_qp_offset[SSSNIC_DCB_COS_MAX]; + u8 cos_qp_num[SSSNIC_DCB_COS_MAX]; +}; + +struct sss_nic_dcb_info { + u8 dcb_on; + u8 default_cos; + u8 trust; + u8 rsvd1; + u8 pcp2cos[SSSNIC_DCB_UP_MAX]; + u8 dscp2cos[64]; + u32 rsvd2[7]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h new file mode 100644 index 00000000000000..adf6b92b961688 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DEV_DEFINE_H +#define SSS_NIC_DEV_DEFINE_H + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_nic_common.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_tx_define.h" +#include "sss_nic_rx_define.h" +#include "sss_nic_irq_define.h" +#include "sss_nic_tcam_define.h" + +enum sss_nic_flags { + SSSNIC_INTF_UP, + SSSNIC_MAC_FILTER_CHANGED, + SSSNIC_LP_TEST, + SSSNIC_RSS_ENABLE, + SSSNIC_DCB_ENABLE, + SSSNIC_SAME_RXTX, + SSSNIC_INTR_ADAPT, + SSSNIC_UPDATE_MAC_FILTER, + SSSNIC_CHANGE_RES_INVALID, + SSSNIC_RSS_DEFAULT_INDIR, + SSSNIC_FORCE_LINK_UP, + SSSNIC_BONDING_MASTER, + SSSNIC_AUTONEG_RESET, + SSSNIC_RXQ_RECOVERY, +}; + +enum sss_nic_event_flags { + SSSNIC_EVENT_TX_TIMEOUT, +}; + +struct sss_nic_tx_stats { + u64 tx_timeout; + + /* Subdivision statistics show in private tool */ + u64 tx_drop; + u64 tx_invalid_qid; + u64 rsvd1; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_qp_resource { + u16 qp_num; + u8 cos_num; + u8 rsvd1; + u32 sq_depth; + u32 rq_depth; + + struct sss_nic_sq_resource *sq_res_group; + struct sss_nic_rq_resource *rq_res_group; + struct sss_nic_irq_cfg *irq_cfg; +}; + +struct sss_nic_rx_rule { + struct list_head rule_list; + int rule_cnt; +}; + +struct sss_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + struct sss_hal_dev *uld_dev; + void *hwdev; + void *dev_hdl; + struct sss_nic_io *nic_io; + + int poll_budget; + + u32 msg_enable; + + unsigned long flags; + unsigned long event_flag; + unsigned long dcb_flags; + unsigned long rx_mode; + + u32 rx_poll_wqe; + + u32 rx_dma_buff_size; + u16 rx_buff_len; + + u16 max_qp_num; + + u32 page_order; + + /* Rss related varibles */ + u8 rss_hash_engine; + u8 rsvd1[3]; + u8 *rss_key; + u32 *rss_key_big; /* hkey in big endian */ + u32 *rss_indir_tbl; + struct sss_nic_rss_type rss_type; + + u8 max_cos_num; + u8 dft_func_cos_bitmap; + u16 dft_port_cos_bitmap; + + int disable_port_cnt; + + unsigned long last_jiffies; + + u32 use_adaptive_rx_coalesce; + u32 rsvd2; + + struct sss_nic_intr_coal_info *coal_info; + struct workqueue_struct *workq; + + int netdev_uc_cnt; + int netdev_mc_cnt; + + int loop_test_rx_cnt; + int loop_pkt_len; + u8 *loop_test_rx_buf; + + struct sss_irq_desc *irq_desc_group; + u16 irq_desc_num; + + u8 link_status; + + u8 rsvd3; + + u32 get_rq_fail_cnt; + + struct sss_nic_tx_stats tx_stats; + + struct sss_nic_sq_desc *sq_desc_group; + struct sss_nic_rq_desc *rq_desc_group; + + struct sss_nic_qp_resource qp_res; + + struct delayed_work routine_work; + struct delayed_work rq_watchdog_work; + + struct list_head uc_filter_list; + struct list_head mc_filter_list; + + unsigned long *vlan_bitmap; +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; +#endif + + /* lock for qp_res,qp_info access */ + struct mutex qp_mutex; + struct semaphore port_sem; + + struct work_struct rx_mode_work; + + struct delayed_work moderation_task; + + struct sss_nic_dcb_config hw_dcb_cfg; + struct sss_nic_dcb_config backup_dcb_cfg; + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + + struct sss_nic_tcam_info tcam_info; + struct sss_nic_rx_rule rx_rule; + + struct sss_nic_service_cap nic_svc_cap; + +}; + +#define SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, flag) \ + test_bit(flag, &(nic_dev)->flags) +#define SSSNIC_SET_NIC_DEV_FLAG(nic_dev, flag) \ + set_bit(flag, &(nic_dev)->flags) +#define SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, flag) \ + clear_bit(flag, &(nic_dev)->flags) +#define SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, flag) \ + test_and_clear_bit(flag, &(nic_dev)->flags) +#define SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, flag) \ + test_and_set_bit(flag, &(nic_dev)->flags) + +#ifdef HAVE_XDP_SUPPORT +#define SSSNIC_IS_XDP_ENABLE(nic_dev) (!!(nic_dev)->xdp_prog) +#endif + +#define SSS_CHANNEL_RES_VALID(nic_dev) \ + (test_bit(SSSNIC_INTF_UP, &(nic_dev)->flags) && \ + !test_bit(SSSNIC_CHANGE_RES_INVALID, &(nic_dev)->flags)) + +#define SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) +#define SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev) (SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) +#define SSSNIC_VLAN_NUM_BITMAP(nic_dev) (VLAN_N_VID / \ + SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev)) +#define SSSNIC_VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ + SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev)) +#define SSSNIC_VID_LINE(nic_dev, vid) ((vid) / SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev)) +#define SSSNIC_VID_COL(nic_dev, vid) ((vid) & (SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev) - 1)) +#define SSSNIC_TEST_VLAN_BIT(nic_dev, vid) \ + ((nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)] & \ + (1UL << SSSNIC_VID_COL(nic_dev, vid))) + +#define SSSNIC_SET_VLAN_BITMAP(nic_dev, vid) \ + set_bit(SSSNIC_VID_COL(nic_dev, vid), \ + &(nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)]) +#define SSSNIC_CLEAR_VLAN_BITMAP(nic_dev, vid) \ + clear_bit(SSSNIC_VID_COL(nic_dev, vid), \ + &(nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)]) + +#define SSSNIC_SET_NIC_EVENT_FLAG(nic_dev, flag) \ + set_bit(flag, &(nic_dev)->event_flag) + +#define SSSNIC_TEST_CLEAR_NIC_EVENT_FLAG(nic_dev, flag) \ + test_and_clear_bit(flag, &(nic_dev)->event_flag) + +#define SSSNIC_STATS_TX_TIMEOUT_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_timeout++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define SSSNIC_STATS_TX_DROP_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_drop++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define SSSNIC_STATS_TX_INVALID_QID_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_invalid_qid++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define sssnic_msg(level, nic_dev, msglvl, format, arg...) \ +do { \ + if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ + == NETREG_REGISTERED) \ + nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ + format, ## arg); \ + else \ + nic_##level(&(nic_dev)->pdev->dev, \ + format, ## arg); \ +} while (0) + +#define sss_nic_info(nic_dev, msglvl, format, arg...) \ + sssnic_msg(info, nic_dev, msglvl, format, ## arg) + +#define sss_nic_warn(nic_dev, msglvl, format, arg...) \ + sssnic_msg(warn, nic_dev, msglvl, format, ## arg) + +#define sss_nic_err(nic_dev, msglvl, format, arg...) \ + sssnic_msg(err, nic_dev, msglvl, format, ## arg) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h new file mode 100644 index 00000000000000..32eccbe831b1ad --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IO_DEFINE_H +#define SSS_NIC_IO_DEFINE_H + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_wq.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_cfg_mag_define.h" + +struct sss_nic_vf_info { + u8 user_mac[ETH_ALEN]; + u8 drv_mac[ETH_ALEN]; + u16 qp_num; + u16 pf_vlan; + + u8 pf_qos; + u8 rsvd0[3]; + u32 extra_feature; + + u32 min_rate; + u32 max_rate; + + u8 specified_mac; + u8 attach; + u8 trust; + u8 spoofchk; + u8 link_forced; + u8 link_up; /* only valid if VF link is forced */ + u8 rsvd1[2]; +}; + +struct sss_nic_io_queue { + struct sss_wq wq; + union { + u8 wqe_type; /* for rq */ + u8 owner; /* for sq */ + }; + u8 rsvd1; + u16 rsvd2; + + u16 qid; + u16 msix_id; + + u8 __iomem *db_addr; + + union { + struct { + void *ci_addr; + } tx; + + struct { + u16 *pi_vaddr; + dma_addr_t pi_daddr; + } rx; + }; +} ____cacheline_aligned; + +struct sss_nic_io { + void *hwdev; + void *pcidev_hdl; + void *dev_hdl; + void *nic_dev; + + struct sss_nic_io_queue *sq_group; + struct sss_nic_io_queue *rq_group; + + u16 active_qp_num; + u16 max_qp_num; + + u8 link_status; + u8 rsvd1[3]; + + void *ci_base_vaddr; + dma_addr_t ci_base_daddr; + + u8 __iomem *sq_db_addr; + u8 __iomem *rq_db_addr; + + u16 rx_buff_len; + u16 max_vf_num; + + struct sss_nic_vf_info *vf_info_group; + + u64 feature_cap; + + struct sss_nic_dcb_info dcb_info; + + struct sss_nic_mag_cfg mag_cfg; +}; + +struct sss_nic_qp_info { + u16 qp_num; + u8 resvd[6]; + + u32 sq_depth; + u32 rq_depth; + + struct sss_nic_io_queue *sq_group; + struct sss_nic_io_queue *rq_group; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h new file mode 100644 index 00000000000000..b6c44d40a22d22 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IRQ_DEFINE_H +#define SSS_NIC_IRQ_DEFINE_H + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" + +struct sss_nic_irq_cfg { + struct net_device *netdev; + u16 msix_id; /* PCIe MSIX id */ + u16 rsvd1; + u32 irq_id; /* OS IRQ id */ + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + void *sq; + void *rq; +}; + +struct sss_nic_intr_coal_info { + u8 pending_limt; + u8 coalesce_timer; + u8 resend_timer; + + u64 pkt_rate_low; + u8 rx_usecs_low; + u8 rx_pending_limt_low; + u64 pkt_rate_high; + u8 rx_usecs_high; + u8 rx_pending_limt_high; + + u8 user_set_intr_coal_flag; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h new file mode 100644 index 00000000000000..9da431372bbf3f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_QP_DEFINE_H +#define SSS_NIC_QP_DEFINE_H + +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" + +struct sss_nic_cqe { + u32 state; + u32 vlan_len; + + u32 offload_type; + u32 hash; + u32 xid; + u32 decrypt_desc; + u32 rsvd6; + u32 pkt_desc; +}; + +struct sss_nic_normal_rqe { + u32 bd_hi_addr; + u32 bd_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct sss_nic_sge_section { + struct sss_sge sge; + u32 rsvd; +}; + +struct sss_nic_extend_rqe { + struct sss_nic_sge_section bd_sect; + struct sss_nic_sge_section cqe_sect; +}; + +struct sss_nic_rqe { + union { + struct sss_nic_normal_rqe normal_rqe; + struct sss_nic_extend_rqe extend_rqe; + }; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h new file mode 100644 index 00000000000000..1ecd5d6409c9d5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_DEFINE_H +#define SSS_NIC_RX_DEFINE_H + +#include + +#include "sss_kernel.h" +#include "sss_nic_qp_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_irq_define.h" + +struct sss_nic_rq_stats { + u64 rx_packets; + u64 rx_bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 rx_dropped; + u64 xdp_dropped; + u64 rx_buf_errors; + + u64 alloc_rx_dma_err; + u64 alloc_skb_err; + u64 reset_drop_sge; + u64 large_xdp_pkts; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_rx_desc { + dma_addr_t buf_daddr; + dma_addr_t cqe_daddr; + struct sss_nic_rqe *rqe; + struct sss_nic_cqe *cqe; + struct page *page; + u32 page_offset; +}; + +struct sss_nic_rq_desc { + struct net_device *netdev; + struct device *dev; /* device for DMA mapping */ + + u32 irq_id; + u16 msix_id; + + u16 qid; + u32 qid_mask; + u32 q_depth; + + u32 buff_size_shift; + u32 dma_buff_size; + u16 buf_len; + u16 rsvd; + + u16 backup_pi; + u16 pi; + u32 last_sw_pi; + u32 last_sw_ci; + u32 last_hw_ci; + u32 ci; + u16 reset_pi; + u16 reset_wqe_num; + u32 delta; + + u64 last_rx_bytes; + u64 last_rx_pkts; + u64 rx_pkts; + + unsigned long status; + + u8 last_pending_limt; + u8 last_coal_timer; + + u8 print_err_cnt; + u8 check_err_cnt; + + struct sss_nic_irq_cfg *irq_cfg; + + struct sss_nic_rq_stats stats; + + struct sss_nic_rx_desc *rx_desc_group; + struct sss_nic_io_queue *rq; + +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; +#endif + + void *cqe_vaddr; + dma_addr_t cqe_paddr; +} ____cacheline_aligned; + +struct sss_nic_rq_resource { + u16 page_num; + u8 rsvd[6]; + struct sss_nic_rx_desc *rx_desc_group; + void *cqe_vaddr; + dma_addr_t cqe_paddr; +}; + +#define SSSNIC_RQ_STATS_INC(rq_desc, field) \ +do { \ + u64_stats_update_begin(&(rq_desc)->stats.stats_sync); \ + (rq_desc)->stats.field++; \ + u64_stats_update_end(&(rq_desc)->stats.stats_sync); \ +} while (0) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h new file mode 100644 index 00000000000000..0a6dec9e9dc074 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TCAM_DEFINE_H +#define SSS_NIC_TCAM_DEFINE_H + +#include +#include + +#include "sss_kernel.h" + +#define SSSNIC_TCAM_BLOCK_SIZE 16 +#define SSSNIC_TCAM_FILTERS_MAX 512 + +#define SSSNIC_PKT_TCAM_INDEX_START(block_index) \ + (SSSNIC_TCAM_BLOCK_SIZE * (block_index)) + +#define SSSNIC_TCAM_FLOW_KEY_SIZE (44) + +#define SSSNIC_TCAM_RULE_FDIR_TYPE 0 +#define SSSNIC_TCAM_RULE_PPA_TYPE 1 + +#define SSSNIC_TCAM_BLOCK_ENABLE 1 +#define SSSNIC_TCAM_BLOCK_DISABLE 0 +#define SSSNIC_TCAM_RULES_NUM_MAX 4096 + +/* tcam block type, according to tcam block size */ +enum { + SSSNIC_TCAM_BLOCK_TYPE_LARGE = 0, /* block_size: 16 */ + SSSNIC_TCAM_BLOCK_TYPE_SMALL, /* block_size: 0 */ + SSSNIC_TCAM_BLOCK_TYPE_MAX +}; + +struct sss_nic_ipv4_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + u32 sipv4_h : 16; + u32 ip_type : 1; + u32 func_id : 15; + u32 dipv4_h : 16; + u32 sipv4_l : 16; + u32 rsvd2 : 16; + u32 dipv4_l : 16; + u32 rsvd3; + u32 dport : 16; + u32 rsvd4 : 16; + u32 rsvd5 : 16; + u32 sport : 16; + u32 outer_sipv4_h : 16; + u32 rsvd6 : 16; + u32 outer_dipv4_h : 16; + u32 outer_sipv4_l : 16; + u32 vni_h : 16; + u32 outer_dipv4_l : 16; + u32 rsvd7 : 16; + u32 vni_l : 16; +}; + +struct sss_nic_ipv6_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + u32 sipv6_key0 : 16; + u32 ip_type : 1; + u32 func_id : 15; + u32 sipv6_key2 : 16; + u32 sipv6_key1 : 16; + u32 sipv6_key4 : 16; + u32 sipv6_key3 : 16; + u32 sipv6_key6 : 16; + u32 sipv6_key5 : 16; + u32 dport : 16; + u32 sipv6_key7 : 16; + u32 dipv6_key0 : 16; + u32 sport : 16; + u32 dipv6_key2 : 16; + u32 dipv6_key1 : 16; + u32 dipv6_key4 : 16; + u32 dipv6_key3 : 16; + u32 dipv6_key6 : 16; + u32 dipv6_key5 : 16; + u32 rsvd2 : 16; + u32 dipv6_key7 : 16; +}; + +struct sss_nic_vxlan_ipv6_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + + u32 dipv6_key0 : 16; + u32 ip_type : 1; + u32 func_id : 15; + + u32 dipv6_key2 : 16; + u32 dipv6_key1 : 16; + + u32 dipv6_key4 : 16; + u32 dipv6_key3 : 16; + + u32 dipv6_key6 : 16; + u32 dipv6_key5 : 16; + + u32 dport : 16; + u32 dipv6_key7 : 16; + + u32 rsvd2 : 16; + u32 sport : 16; + + u32 outer_sipv4_h : 16; + u32 rsvd3 : 16; + + u32 outer_dipv4_h : 16; + u32 outer_sipv4_l : 16; + + u32 vni_h : 16; + u32 outer_dipv4_l : 16; + + u32 rsvd4 : 16; + u32 vni_l : 16; +}; + +struct sss_nic_tcam_key_tag { + union { + struct sss_nic_ipv4_tcam_key key_info_ipv4; + struct sss_nic_ipv6_tcam_key key_info_ipv6; + struct sss_nic_vxlan_ipv6_tcam_key key_info_vxlan_ipv6; + }; + + union { + struct sss_nic_ipv4_tcam_key key_mask_ipv4; + struct sss_nic_ipv6_tcam_key key_mask_ipv6; + struct sss_nic_vxlan_ipv6_tcam_key key_mask_vxlan_ipv6; + }; +}; + +struct sss_nic_tcam_node { + struct list_head block_list; + u16 block_id; + u16 index_cnt; + u8 index_used[SSSNIC_TCAM_BLOCK_SIZE]; +}; + +struct sss_nic_tcam_node_list { + struct list_head tcam_node_list; + u16 block_cnt; +}; + +struct sss_nic_tcam_filter { + struct list_head tcam_filter_list; + u16 block_id; + u16 index; + struct sss_nic_tcam_key_tag tcam_key; + u16 qid; +}; + +/* function level struct info */ +struct sss_nic_tcam_info { + u16 tcam_rule_num; + struct list_head tcam_list; + struct sss_nic_tcam_node_list tcam_node_info; +}; + +struct sss_nic_tcam_result { + u32 qid; + u32 rsvd; +}; + +struct sss_nic_tcam_key { + u8 key_x[SSSNIC_TCAM_FLOW_KEY_SIZE]; + u8 key_y[SSSNIC_TCAM_FLOW_KEY_SIZE]; +}; + +struct sss_nic_tcam_rule_cfg { + u32 index; + struct sss_nic_tcam_result data; + struct sss_nic_tcam_key key; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h new file mode 100644 index 00000000000000..b6076c87121aa0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_DEFINE_H +#define SSS_NIC_TX_DEFINE_H + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_nic_io_define.h" + +struct sss_nic_dma_info { + dma_addr_t dma; + u32 len; +}; + +struct sss_nic_tx_desc { + struct sk_buff *skb; + + u16 wqebb_cnt; + u16 nr_frags; + + int sge_num; + u16 nr_pkt_cnt; + u16 rsvd1; + u32 rsvd2; + + u64 bytes; + struct sss_nic_dma_info *dma_group; + u64 rsvd3; +}; + +struct sss_nic_sq_stats { + u64 tx_packets; + u64 tx_bytes; + u64 tx_busy; + u64 wake; + u64 tx_dropped; + + /* Subdivision statistics show in private tool */ + u64 skb_pad_err; + u64 offload_err; + u64 dma_map_err; + u64 unknown_tunnel_proto; + u64 frag_size_zero; + u64 frag_len_overflow; + u64 rsvd1; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_sq_desc { + struct net_device *netdev; + struct device *dev; + + struct sss_nic_sq_stats stats; + + u8 cos; + u8 rsvd1; + u16 qid; + u32 qid_mask; + u32 q_depth; + u32 rsvd2; + + struct sss_nic_tx_desc *tx_desc_group; + struct sss_nic_io_queue *sq; + + u64 last_tx_pkts; + u64 last_tx_bytes; + u64 rsvd3; +} ____cacheline_aligned; + +struct sss_nic_sq_resource { + struct sss_nic_tx_desc *tx_desc_group; + struct sss_nic_dma_info *dma_group; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c new file mode 100644 index 00000000000000..663403ff6da453 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c @@ -0,0 +1,1140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_event.h" + +#define SSSNIC_DEFAULT_MAX_MTU 0x3FFF +#define SSSNIC_VLAN_ID_MASK 0x7FFF + +#define SSSNIC_INIT_FUNC_MASK \ + (BIT(SSSNIC_FUNC_CFG_TYPE_INIT) | \ + BIT(SSSNIC_FUNC_CFG_TYPE_MTU) | \ + BIT(SSSNIC_FUNC_CFG_TYPE_RX_BUF_SIZE)) + +#define SSSNIC_MGMT_STATUS_EXIST 0x6 + +#define SSSNIC_CHECK_IPSU_15BIT 0x8000 + +#define SSSNIC_DCB_PCP 0 +#define SSSNIC_DCB_DSCP 1 + +#define SSSNIC_F_ALL_MASK 0x3FFFF /* enable all feature */ +#define SSSNIC_DRV_DEFAULT_FEATURE SSSNIC_F_ALL_MASK + +#define SSSNIC_UNSUPPORT_SET_PAUSE 0x10 + +#define SSSNIC_VF_SET_MAC_ALREADY(func_type, status) \ + ((func_type) == SSS_FUNC_TYPE_VF && (status) == SSSNIC_PF_SET_VF_ALREADY) + +static int sss_nic_check_mac_set_status(u32 func_type, u8 status, u16 vlan_id) +{ + if (status != 0 && status != SSSNIC_MGMT_STATUS_EXIST) { + if (!SSSNIC_VF_SET_MAC_ALREADY(func_type, status)) + return -EINVAL; + } + + if ((vlan_id & SSSNIC_CHECK_IPSU_15BIT) != 0 && status == SSSNIC_MGMT_STATUS_EXIST) { + if (!SSSNIC_VF_SET_MAC_ALREADY(func_type, status)) + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + u32 func_type; + int ret; + + if (!nic_dev || !mac_addr) + return -EINVAL; + + if ((vlan_id & SSSNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_dev->dev_hdl, "Invalid VLAN ID: %d\n", (vlan_id & SSSNIC_VLAN_ID_MASK)); + return -EINVAL; + } + + cmd_mac.vlan_id = vlan_id; + cmd_mac.func_id = func_id; + ether_addr_copy(cmd_mac.mac, mac_addr); + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_MAC, + &cmd_mac, sizeof(cmd_mac), + &cmd_mac, &out_len, channel); + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set MAC, ret: %d, out_len: 0x%x, channel: 0x%x\n", + ret, out_len, channel); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (sss_nic_check_mac_set_status(func_type, cmd_mac.head.state, cmd_mac.vlan_id) != 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set MAC, state: 0x%x, channel: 0x%x\n", + cmd_mac.head.state, channel); + return -EIO; + } + + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, + "PF has already set VF mac, ignore it\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac.head.state == SSSNIC_MGMT_STATUS_EXIST) { + nic_warn(nic_dev->dev_hdl, "Repeat mac, ignore it\n"); + return 0; + } + + return 0; +} + +int sss_nic_del_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + u32 func_type; + int ret; + + if (!nic_dev || !mac_addr) + return -EINVAL; + + if ((vlan_id & SSSNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_dev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & SSSNIC_VLAN_ID_MASK)); + return -EINVAL; + } + + cmd_mac.func_id = func_id; + cmd_mac.vlan_id = vlan_id; + ether_addr_copy(cmd_mac.mac, mac_addr); + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_DEL_MAC, + &cmd_mac, sizeof(cmd_mac), &cmd_mac, + &out_len, channel); + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to del MAC, ret: %d, out_len: 0x%x, channel: 0x%x\n", + ret, out_len, channel); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, "PF has already set VF mac\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac.head.state != 0) { + nic_err(nic_dev->dev_hdl, + "Fail to delete MAC, ret: %d, state: 0x%x, channel: 0x%x\n", + ret, cmd_mac.head.state, channel); + return -EIO; + } + + return 0; +} + +int sss_nic_update_mac(struct sss_nic_dev *nic_dev, u8 *new_mac) +{ + int ret; + u32 func_type; + struct sss_nic_mbx_mac_update cmd_mac_update = {0}; + u16 out_len = sizeof(cmd_mac_update); + + ether_addr_copy(cmd_mac_update.new_mac, new_mac); + ether_addr_copy(cmd_mac_update.old_mac.mac, nic_dev->netdev->dev_addr); + cmd_mac_update.old_mac.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_UPDATE_MAC, + &cmd_mac_update, sizeof(cmd_mac_update), + &cmd_mac_update, &out_len); + + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to update MAC, ret: %d, out_len: 0x%x\n", ret, out_len); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (sss_nic_check_mac_set_status(func_type, cmd_mac_update.old_mac.head.state, + cmd_mac_update.old_mac.vlan_id)) { + nic_err(nic_dev->dev_hdl, + "Fail to update MAC, state: 0x%x", cmd_mac_update.old_mac.head.state); + return -EIO; + } + + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac_update.old_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, + "PF has already set VF MAC. Ignore update\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac_update.old_mac.head.state == SSSNIC_MGMT_STATUS_EXIST) + nic_warn(nic_dev->dev_hdl, + "MAC is existed. Ignore update\n"); + + return 0; +} + +int sss_nic_get_default_mac(struct sss_nic_dev *nic_dev, u8 *mac_addr) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + int ret; + + cmd_mac.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_MAC, + &cmd_mac, sizeof(cmd_mac), &cmd_mac, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_mac)) { + nic_err(nic_dev->hwdev, + "Fail to get mac, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_mac.head.state, out_len); + return -EINVAL; + } + + ether_addr_copy(mac_addr, cmd_mac.mac); + + return 0; +} + +int sss_nic_config_vlan(struct sss_nic_dev *nic_dev, u8 opcode, u16 vlan_id) +{ + struct sss_nic_mbx_vlan_cfg cmd_config_vlan = {0}; + u16 out_len = sizeof(cmd_config_vlan); + int ret; + + cmd_config_vlan.func_id = + sss_get_global_func_id(nic_dev->hwdev); + cmd_config_vlan.opcode = opcode; + cmd_config_vlan.vlan_id = vlan_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_FUNC_VLAN, + &cmd_config_vlan, sizeof(cmd_config_vlan), + &cmd_config_vlan, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_config_vlan)) { + nic_err(nic_dev->dev_hdl, + "Fail to %s vlan, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSSNIC_MBX_OPCODE_ADD ? "add" : "delete", + ret, cmd_config_vlan.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_hw_vport_state(struct sss_nic_dev *nic_dev, + u16 func_id, bool enable, u16 channel) +{ + struct sss_nic_mbx_vport_state cmd_set_vport_state = {0}; + u16 out_len = sizeof(cmd_set_vport_state); + int ret; + + cmd_set_vport_state.func_id = func_id; + cmd_set_vport_state.state = enable ? 1 : 0; + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_VPORT_ENABLE, + &cmd_set_vport_state, sizeof(cmd_set_vport_state), + &cmd_set_vport_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_vport_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set vport state, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_set_vport_state.head.state, out_len, channel); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sss_nic_set_hw_vport_state); + +int sss_nic_set_dcb_info(struct sss_nic_io *nic_io, + struct sss_nic_dcb_info *dcb_info) +{ + if (memcmp(&nic_io->dcb_info, dcb_info, sizeof(*dcb_info)) == 0) + return 0; + + memcpy(&nic_io->dcb_info, dcb_info, sizeof(*dcb_info)); + + /* notify stateful in pf, than notify all vf */ + sss_nic_notify_dcb_state_event(nic_io->hwdev, dcb_info); + + return 0; +} + +static int sss_nic_cfg_hw_pause(struct sss_nic_dev *nic_dev, + u8 opcode, struct sss_nic_pause_cfg *pause_cfg) +{ + struct sss_nic_mbx_pause_cfg cmd_pause_cfg = {0}; + u16 out_len = sizeof(cmd_pause_cfg); + int ret; + + cmd_pause_cfg.port_id = sss_get_phy_port_id(nic_dev->hwdev); + cmd_pause_cfg.opcode = opcode; + if (opcode == SSSNIC_MBX_OPCODE_SET) { + cmd_pause_cfg.auto_neg = pause_cfg->auto_neg; + cmd_pause_cfg.rx_pause = pause_cfg->rx_pause; + cmd_pause_cfg.tx_pause = pause_cfg->tx_pause; + } + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_PAUSE_INFO, + &cmd_pause_cfg, sizeof(cmd_pause_cfg), + &cmd_pause_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_pause_cfg)) { + if (cmd_pause_cfg.head.state == SSSNIC_UNSUPPORT_SET_PAUSE) { + ret = -EOPNOTSUPP; + nic_err(nic_dev->dev_hdl, "Fail to set pause when pfc is enable\n"); + } else { + ret = -EFAULT; + nic_err(nic_dev->dev_hdl, + "Fail to %s pause info, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSSNIC_MBX_OPCODE_SET ? "set" : "get", + ret, cmd_pause_cfg.head.state, out_len); + } + return ret; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) { + pause_cfg->auto_neg = cmd_pause_cfg.auto_neg; + pause_cfg->rx_pause = cmd_pause_cfg.rx_pause; + pause_cfg->tx_pause = cmd_pause_cfg.tx_pause; + } + + return 0; +} + +int sss_nic_set_hw_pause_info(struct sss_nic_dev *nic_dev, + struct sss_nic_pause_cfg pause_cfg) +{ + struct sss_nic_mag_cfg *mag_cfg = NULL; + int ret; + + mag_cfg = &nic_dev->nic_io->mag_cfg; + + down(&mag_cfg->cfg_lock); + + ret = sss_nic_cfg_hw_pause(nic_dev, SSSNIC_MBX_OPCODE_SET, &pause_cfg); + if (ret != 0) { + up(&mag_cfg->cfg_lock); + return ret; + } + + mag_cfg->pfc_en = 0; + mag_cfg->pfc_bitmap = 0; + mag_cfg->pause_set = true; + mag_cfg->nic_pause.auto_neg = pause_cfg.auto_neg; + mag_cfg->nic_pause.rx_pause = pause_cfg.rx_pause; + mag_cfg->nic_pause.tx_pause = pause_cfg.tx_pause; + + up(&mag_cfg->cfg_lock); + + return 0; +} + +int sss_nic_get_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg *pause_cfg) +{ + struct sss_nic_mag_cfg *mag_cfg = NULL; + int ret = 0; + + ret = sss_nic_cfg_hw_pause(nic_dev, SSSNIC_MBX_OPCODE_GET, pause_cfg); + if (ret != 0) + return ret; + + mag_cfg = &nic_dev->nic_io->mag_cfg; + if (mag_cfg->pause_set || pause_cfg->auto_neg == SSSNIC_PORT_AN_NOT_SET) { + pause_cfg->rx_pause = mag_cfg->nic_pause.rx_pause; + pause_cfg->tx_pause = mag_cfg->nic_pause.tx_pause; + } + + return 0; +} + +int sss_nic_set_hw_dcb_state(struct sss_nic_dev *nic_dev, u8 op_code, u8 state) +{ + struct sss_nic_mbx_dcb_state cmd_dcb_state = {0}; + u16 out_len = sizeof(cmd_dcb_state); + int ret; + + cmd_dcb_state.state = state; + cmd_dcb_state.op_code = op_code; + cmd_dcb_state.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_QOS_DCB_STATE, + &cmd_dcb_state, sizeof(cmd_dcb_state), + &cmd_dcb_state, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_dcb_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set dcb state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_dcb_state.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +int sss_nic_clear_hw_qp_resource(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_clear_qp_resource qp_res = {0}; + u16 out_len = sizeof(qp_res); + int ret; + + if (!nic_dev) + return -EINVAL; + + qp_res.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CLEAR_QP_RESOURCE, + &qp_res, sizeof(qp_res), &qp_res, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &qp_res)) { + nic_err(nic_dev->dev_hdl, + "Fail to clear qp resource, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, qp_res.head.state, out_len); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(sss_nic_clear_hw_qp_resource); + +int sss_nic_cache_out_qp_resource(struct sss_nic_io *nic_io) +{ + struct sss_nic_mbx_invalid_qp_cache cmd_qp_res = {0}; + u16 out_len = sizeof(cmd_qp_res); + int ret; + + cmd_qp_res.func_id = sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CACHE_OUT_QP_RES, + &cmd_qp_res, sizeof(cmd_qp_res), + &cmd_qp_res, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_qp_res)) { + nic_err(nic_io->dev_hdl, + "Fail to cache out qp resources, ret: %d, state: 0x%x, out len: 0x%x\n", + ret, cmd_qp_res.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_vport_stats(struct sss_nic_dev *nic_dev, u16 func_id, + struct sss_nic_port_stats *stats) +{ + struct sss_nic_mbx_port_stats_info cmd_port_stats = {0}; + struct sss_nic_mbx_port_stats vport_stats = {0}; + u16 out_len = sizeof(vport_stats); + int ret; + + if (!nic_dev || !stats) + return -EINVAL; + + cmd_port_stats.func_id = func_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_VPORT_STAT, + &cmd_port_stats, sizeof(cmd_port_stats), + &vport_stats, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &vport_stats)) { + nic_err(nic_dev->dev_hdl, + "Fail to get vport statistics, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, vport_stats.head.state, out_len); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return 0; +} + +static int sss_nic_set_func_table(struct sss_nic_io *nic_io, + u32 cfg_mask, const struct sss_nic_func_table_cfg *cfg) +{ + struct sss_nic_mbx_set_func_table cmd_func_tbl = {0}; + u16 out_len = sizeof(cmd_func_tbl); + int ret; + + cmd_func_tbl.tbl_cfg = *cfg; + cmd_func_tbl.cfg_bitmap = cfg_mask; + cmd_func_tbl.func_id = sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, + SSSNIC_MBX_OPCODE_SET_FUNC_TBL, + &cmd_func_tbl, sizeof(cmd_func_tbl), + &cmd_func_tbl, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_func_tbl)) { + nic_err(nic_io->dev_hdl, + "Fail to set func table, bitmap: 0x%x, ret: %d, state: 0x%x, out_len: 0x%x\n", + cfg_mask, ret, cmd_func_tbl.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_init_func_table(struct sss_nic_io *nic_io) +{ + struct sss_nic_func_table_cfg tbl_cfg = {0}; + + tbl_cfg.mtu = SSSNIC_DEFAULT_MAX_MTU; + tbl_cfg.rx_wqe_buf_size = nic_io->rx_buff_len; + + return sss_nic_set_func_table(nic_io, SSSNIC_INIT_FUNC_MASK, &tbl_cfg); +} + +int sss_nic_set_dev_mtu(struct sss_nic_dev *nic_dev, u16 new_mtu) +{ + struct sss_nic_func_table_cfg func_tbl_cfg = {0}; + + if (new_mtu < SSSNIC_MIN_MTU_SIZE || new_mtu > SSSNIC_MAX_JUMBO_FRAME_SIZE) { + nic_err(nic_dev->dev_hdl, + "Invalid mtu size: %ubytes, mtu range %ubytes - %ubytes.\n", + new_mtu, SSSNIC_MIN_MTU_SIZE, SSSNIC_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + + func_tbl_cfg.mtu = new_mtu; + + return sss_nic_set_func_table(nic_dev->nic_io, + BIT(SSSNIC_FUNC_CFG_TYPE_MTU), &func_tbl_cfg); +} + +static int sss_nic_feature_nego(struct sss_nic_io *nic_io, u8 opcode, u64 *feature) +{ + struct sss_nic_mbx_feature_nego cmd_feature_nego = {0}; + u16 out_len = sizeof(cmd_feature_nego); + int ret; + + cmd_feature_nego.opcode = opcode; + cmd_feature_nego.func_id = sss_get_global_func_id(nic_io->hwdev); + if (opcode == SSSNIC_MBX_OPCODE_SET) + memcpy(cmd_feature_nego.feature, feature, sizeof(u64)); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_FEATURE_NEGO, + &cmd_feature_nego, sizeof(cmd_feature_nego), + &cmd_feature_nego, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_feature_nego)) { + nic_err(nic_io->dev_hdl, + "Fail to negotiate nic feature, ret:%d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_feature_nego.head.state, out_len); + return -EIO; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) + memcpy(feature, cmd_feature_nego.feature, sizeof(u64)); + + return 0; +} + +static int sss_nic_get_bios_pf_bandwidth(struct sss_nic_io *nic_io) +{ + struct sss_nic_mbx_bios_cfg cmd_bios_cfg = {0}; + u16 out_len = sizeof(cmd_bios_cfg); + int ret; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF || + !SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + return 0; + + cmd_bios_cfg.op_code = SSSNIC_NVM_PF_SPEED_LIMIT; + cmd_bios_cfg.bios_cfg.func_valid = SSSNIC_BIOS_FUN_VALID; + cmd_bios_cfg.bios_cfg.func_id = (u8)sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_BIOS_CFG, + &cmd_bios_cfg, sizeof(cmd_bios_cfg), + &cmd_bios_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_bios_cfg)) { + nic_err(nic_io->dev_hdl, + "Fail to get bios pf bandwidth limit, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_bios_cfg.head.state, out_len); + return -EIO; + } + + if (cmd_bios_cfg.bios_cfg.pf_bw > SSSNIC_MAX_LIMIT_BW) { + nic_err(nic_io->dev_hdl, "Invalid bios cfg pf bandwidth limit: %u\n", + cmd_bios_cfg.bios_cfg.pf_bw); + return -EINVAL; + } + + if (cmd_bios_cfg.bios_cfg.signature != SSSNIC_BIOS_SIGNATURE) + nic_warn(nic_io->dev_hdl, "Invalid bios configuration data, signature: 0x%x\n", + cmd_bios_cfg.bios_cfg.signature); + + nic_io->mag_cfg.pf_bw_limit = cmd_bios_cfg.bios_cfg.pf_bw; + + return 0; +} + +static int sss_nic_get_feature_from_hw(struct sss_nic_io *nic_io) +{ + return sss_nic_feature_nego(nic_io, SSSNIC_MBX_OPCODE_GET, &nic_io->feature_cap); +} + +int sss_nic_set_feature_to_hw(struct sss_nic_io *nic_io) +{ + return sss_nic_feature_nego(nic_io, SSSNIC_MBX_OPCODE_SET, &nic_io->feature_cap); +} + +void sss_nic_update_nic_feature(struct sss_nic_dev *nic_dev, u64 feature) +{ + struct sss_nic_io *nic_io = nic_dev->nic_io; + + nic_io->feature_cap = feature; + + nic_info(nic_io->dev_hdl, "Update nic feature to 0x%llx\n", nic_io->feature_cap); +} + +int sss_nic_io_init(struct sss_nic_dev *nic_dev) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct sss_nic_io *nic_io = NULL; + int ret; + + nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL); + if (!nic_io) + return -ENOMEM; + + nic_io->hwdev = nic_dev->hwdev; + nic_io->pcidev_hdl = pdev; + nic_io->dev_hdl = &pdev->dev; + nic_io->nic_dev = nic_dev; + mutex_init(&nic_io->mag_cfg.sfp_mutex); + sema_init(&nic_io->mag_cfg.cfg_lock, 1); + nic_io->rx_buff_len = nic_dev->rx_buff_len; + nic_dev->nic_io = nic_io; + + ret = sss_register_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to register service adapter\n"); + goto register_adapter_err; + } + + ret = sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + true, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to set function svc used state\n"); + goto set_state_err; + } + + ret = sss_nic_init_func_table(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init function table\n"); + goto init_func_table_err; + } + + ret = sss_nic_get_feature_from_hw(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to get nic features\n"); + goto get_feature_from_hw_err; + } + + ret = sss_nic_get_bios_pf_bandwidth(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to get pf bandwidth limit\n"); + goto get_bios_pf_bandwidth_err; + } + + ret = sss_nic_init_pf_vf_info(nic_io); + if (ret != 0) + goto init_pf_vf_info_err; + + ret = sss_nic_register_io_callback(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init vf info\n"); + goto register_io_callback_err; + } + + nic_io->feature_cap &= SSSNIC_DRV_DEFAULT_FEATURE; + + return 0; + +register_io_callback_err: + sss_nic_deinit_pf_vf_info(nic_io); + +init_pf_vf_info_err: +get_bios_pf_bandwidth_err: +get_feature_from_hw_err: +init_func_table_err: + sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + false, SSS_CHANNEL_NIC); + +set_state_err: + sss_unregister_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC); + +register_adapter_err: + nic_dev->nic_io = NULL; + kfree(nic_io); + + return ret; +} +EXPORT_SYMBOL(sss_nic_io_init); + +void sss_nic_io_deinit(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_io *nic_io = nic_dev->nic_io; + + sss_nic_unregister_io_callback(nic_io); + + if (nic_io->vf_info_group) { + sss_nic_clear_all_vf_info(nic_io); + sss_nic_deinit_pf_vf_info(nic_io); + } + + sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + false, SSS_CHANNEL_NIC); + + sss_unregister_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC); + + nic_dev->nic_io = NULL; + kfree(nic_io); +} +EXPORT_SYMBOL(sss_nic_io_deinit); + +int sss_nic_force_drop_tx_pkt(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_force_drop_pkt cmd_force_drop_pkt = {0}; + u16 out_len = sizeof(cmd_force_drop_pkt); + int ret; + + cmd_force_drop_pkt.port = sss_get_phy_port_id(nic_dev->hwdev); + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_FORCE_PKT_DROP, + &cmd_force_drop_pkt, sizeof(cmd_force_drop_pkt), + &cmd_force_drop_pkt, &out_len); + if ((cmd_force_drop_pkt.head.state != SSS_MGMT_CMD_UNSUPPORTED && + cmd_force_drop_pkt.head.state) || ret || !out_len) { + nic_err(nic_dev->dev_hdl, + "Fail to force drop tx packet, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_force_drop_pkt.head.state, out_len); + return -EFAULT; + } + + return cmd_force_drop_pkt.head.state; +} + +int sss_nic_set_rx_mode(struct sss_nic_dev *nic_dev, u32 rx_mode) +{ + struct sss_nic_mbx_set_rx_mode cmd_set_rx_mode = {0}; + u16 out_len = sizeof(cmd_set_rx_mode); + int ret; + + cmd_set_rx_mode.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_rx_mode.rx_mode = rx_mode; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_RX_MODE, + &cmd_set_rx_mode, sizeof(cmd_set_rx_mode), + &cmd_set_rx_mode, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_rx_mode)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rx mode, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_set_rx_mode.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rx_vlan_offload(struct sss_nic_dev *nic_dev, bool en) +{ + struct sss_nic_mbx_offload_vlan cmd_vlan_offload = {0}; + u16 out_len = sizeof(cmd_vlan_offload); + int ret; + + cmd_vlan_offload.vlan_offload = (u8)en; + cmd_vlan_offload.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_RX_VLAN_OFFLOAD, + &cmd_vlan_offload, sizeof(cmd_vlan_offload), + &cmd_vlan_offload, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_vlan_offload)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rx vlan offload, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_vlan_offload.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_update_mac_vlan(struct sss_nic_dev *nic_dev, u16 old_vlan, u16 new_vlan, int vf_id) +{ + struct sss_nic_vf_info *vf_info = NULL; + struct sss_nic_io *nic_io = nic_dev->nic_io; + u16 func_id; + int ret; + + if (old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) + return -EINVAL; + + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + if (!nic_io->vf_info_group || is_zero_ether_addr(vf_info->drv_mac)) + return 0; + + func_id = sss_get_glb_pf_vf_offset(nic_dev->hwdev) + (u16)vf_id; + + ret = sss_nic_del_mac(nic_dev, vf_info->drv_mac, + func_id, old_vlan, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to delete VF %d MAC %pM vlan %u\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac, old_vlan); + return ret; + } + + ret = sss_nic_set_mac(nic_dev, vf_info->drv_mac, + func_id, new_vlan, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to add VF %d MAC %pM vlan %u\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac, new_vlan); + sss_nic_set_mac(nic_dev, vf_info->drv_mac, + func_id, old_vlan, SSS_CHANNEL_NIC); + return ret; + } + + return 0; +} + +static int sss_nic_set_rx_lro(struct sss_nic_dev *nic_dev, + bool lro_en, u8 lro_max_pkt_len) +{ + struct sss_nic_mbx_lro_cfg cmd_lro_cfg = {0}; + u16 out_len = sizeof(cmd_lro_cfg); + int ret; + + cmd_lro_cfg.lro_ipv4_en = (u8)lro_en; + cmd_lro_cfg.lro_ipv6_en = (u8)lro_en; + cmd_lro_cfg.lro_max_pkt_len = lro_max_pkt_len; + cmd_lro_cfg.opcode = SSSNIC_MBX_OPCODE_SET; + cmd_lro_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_RX_LRO, + &cmd_lro_cfg, sizeof(cmd_lro_cfg), + &cmd_lro_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_lro_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set lro offload, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_lro_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +static int sss_nic_set_rx_lro_timer(struct sss_nic_dev *nic_dev, u32 value) +{ + struct sss_nic_mbx_lro_timer cmd_lro_timer = {0}; + u16 out_len = sizeof(cmd_lro_timer); + int ret; + + cmd_lro_timer.timer = value; + cmd_lro_timer.opcode = SSSNIC_MBX_OPCODE_SET; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_LRO_TIMER, + &cmd_lro_timer, sizeof(cmd_lro_timer), + &cmd_lro_timer, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_lro_timer)) { + nic_err(nic_dev->dev_hdl, + "Fail to set lro timer, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_lro_timer.head.state, out_len); + + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rx_lro_state(struct sss_nic_dev *nic_dev, bool en, u32 timer, u32 max_pkt_len) +{ + int ret; + + nic_info(nic_dev->dev_hdl, "Set LRO max coalesce packet size to %uK\n", + max_pkt_len); + ret = sss_nic_set_rx_lro(nic_dev, en, (u8)max_pkt_len); + if (ret != 0) + return ret; + + /* we don't set LRO timer for VF */ + if (sss_get_func_type(nic_dev->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + nic_info(nic_dev->dev_hdl, "Success to set LRO timer to %u\n", timer); + + return sss_nic_set_rx_lro_timer(nic_dev, timer); +} + +int sss_nic_set_vlan_fliter(struct sss_nic_dev *nic_dev, bool en) +{ + struct sss_nic_mbx_vlan_filter_cfg cmd_set_filter = {0}; + u16 out_len = sizeof(cmd_set_filter); + int ret; + + cmd_set_filter.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_filter.vlan_filter_ctrl = (u32)en; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_VLAN_FILTER_EN, + &cmd_set_filter, sizeof(cmd_set_filter), + &cmd_set_filter, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_filter)) { + nic_err(nic_dev->dev_hdl, + "Fail to set vlan filter, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_set_filter.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_add_tcam_rule(struct sss_nic_dev *nic_dev, struct sss_nic_tcam_rule_cfg *tcam_rule) +{ + struct sss_nic_mbx_add_tcam_rule cmd_add_tcam_rule = {0}; + u16 out_len = sizeof(cmd_add_tcam_rule); + int ret; + + if (!nic_dev || !tcam_rule) + return -EINVAL; + + if (tcam_rule->index >= SSSNIC_TCAM_RULES_NUM_MAX) { + nic_err(nic_dev->dev_hdl, "Invalid tcam rules num :%u to add\n", + tcam_rule->index); + return -EINVAL; + } + + memcpy((void *)&cmd_add_tcam_rule.rule, (void *)tcam_rule, + sizeof(struct sss_nic_tcam_rule_cfg)); + cmd_add_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_add_tcam_rule.type = SSSNIC_TCAM_RULE_FDIR_TYPE; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_ADD_TC_FLOW, + &cmd_add_tcam_rule, sizeof(cmd_add_tcam_rule), + &cmd_add_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_add_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to add tcam rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_add_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_del_tcam_rule(struct sss_nic_dev *nic_dev, u32 index) +{ + struct sss_nic_mbx_del_tcam_rule cmd_del_tcam_rule = {0}; + u16 out_len = sizeof(cmd_del_tcam_rule); + int ret; + + if (!nic_dev) + return -EINVAL; + + if (index >= SSSNIC_TCAM_RULES_NUM_MAX) { + nic_err(nic_dev->dev_hdl, "Invalid tcam rule num :%u to del\n", index); + return -EINVAL; + } + + cmd_del_tcam_rule.index_start = index; + cmd_del_tcam_rule.index_num = 1; + cmd_del_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_del_tcam_rule.type = SSSNIC_TCAM_RULE_FDIR_TYPE; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_DEL_TC_FLOW, + &cmd_del_tcam_rule, sizeof(cmd_del_tcam_rule), + &cmd_del_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_del_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to delete tcam rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_del_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +static int sss_nic_mgmt_tcam_block(struct sss_nic_dev *nic_dev, u8 alloc_en, u16 *index) +{ + struct sss_nic_mbx_tcam_block_cfg cmd_mgmt_tcam_block = {0}; + u16 out_len = sizeof(cmd_mgmt_tcam_block); + int ret; + + if (!nic_dev || !index) + return -EINVAL; + + cmd_mgmt_tcam_block.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_mgmt_tcam_block.alloc_en = alloc_en; + cmd_mgmt_tcam_block.tcam_type = SSSNIC_TCAM_BLOCK_TYPE_LARGE; + cmd_mgmt_tcam_block.tcam_block_index = *index; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_TCAM_BLOCK, + &cmd_mgmt_tcam_block, sizeof(cmd_mgmt_tcam_block), + &cmd_mgmt_tcam_block, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_mgmt_tcam_block)) { + nic_err(nic_dev->dev_hdl, + "Fail to set tcam block, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_mgmt_tcam_block.head.state, out_len); + return -EIO; + } + + if (alloc_en) + *index = cmd_mgmt_tcam_block.tcam_block_index; + + return 0; +} + +int sss_nic_alloc_tcam_block(struct sss_nic_dev *nic_dev, u16 *index) +{ + return sss_nic_mgmt_tcam_block(nic_dev, SSSNIC_TCAM_BLOCK_ENABLE, index); +} + +int sss_nic_free_tcam_block(struct sss_nic_dev *nic_dev, u16 *index) +{ + return sss_nic_mgmt_tcam_block(nic_dev, SSSNIC_TCAM_BLOCK_DISABLE, index); +} + +int sss_nic_set_fdir_tcam_rule_filter(struct sss_nic_dev *nic_dev, bool enable) +{ + struct sss_nic_mbx_set_tcam_state cmd_set_tcam_enable = {0}; + u16 out_len = sizeof(cmd_set_tcam_enable); + int ret; + + cmd_set_tcam_enable.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_tcam_enable.tcam_enable = (u8)enable; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_ENABLE_TCAM, + &cmd_set_tcam_enable, sizeof(cmd_set_tcam_enable), + &cmd_set_tcam_enable, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_tcam_enable)) { + nic_err(nic_dev->dev_hdl, + "Fail to set fdir tcam filter, ret: %d, state: 0x%x, out_len: 0x%x, enable: 0x%x\n", + ret, cmd_set_tcam_enable.head.state, out_len, + enable); + return -EIO; + } + + return 0; +} + +int sss_nic_flush_tcam_rule(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_flush_tcam_rule cmd_flush_tcam_rule = {0}; + u16 out_len = sizeof(cmd_flush_tcam_rule); + int ret; + + cmd_flush_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_FLUSH_TCAM, + &cmd_flush_tcam_rule, + sizeof(cmd_flush_tcam_rule), + &cmd_flush_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_flush_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to flush tcam fdir rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_flush_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_rq_hw_pc_info(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_pc_info *out_info, u16 qp_num, u16 wqe_type) +{ + int ret; + u16 i; + struct sss_nic_rq_pc_info *rq_pc_info = NULL; + struct sss_nic_rq_hw_info *rq_hw = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to alloc cmd_buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(*rq_hw); + + rq_hw = msg_buf->buf; + rq_hw->num_queues = qp_num; + rq_hw->func_id = sss_get_global_func_id(nic_dev->hwdev); + sss_cpu_to_be32(rq_hw, sizeof(*rq_hw)); + + ret = sss_ctrlq_detail_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_RXQ_INFO_GET, + msg_buf, msg_buf, NULL, 0, SSS_CHANNEL_NIC); + if (ret) + goto get_rq_info_error; + + rq_pc_info = msg_buf->buf; + for (i = 0; i < qp_num; i++) { + out_info[i].hw_ci = rq_pc_info[i].hw_ci >> wqe_type; + out_info[i].hw_pi = rq_pc_info[i].hw_pi >> wqe_type; + } + +get_rq_info_error: + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return ret; +} + +int sss_nic_set_pf_rate(struct sss_nic_dev *nic_dev, u8 speed) +{ + int ret; + u32 pf_rate; + u32 speed_convert[SSSNIC_PORT_SPEED_UNKNOWN] = { + 0, 10, 100, 1000, 10000, 25000, 40000, 50000, 100000, 200000 + }; + struct sss_nic_io *nic_io = nic_dev->nic_io; + struct sss_nic_mbx_tx_rate_cfg rate_cfg = {0}; + u16 out_len = sizeof(rate_cfg); + + if (speed >= SSSNIC_PORT_SPEED_UNKNOWN) { + nic_err(nic_io->dev_hdl, "Invalid speed level: %u\n", speed); + return -EINVAL; + } + + if (nic_io->mag_cfg.pf_bw_limit == SSSNIC_PF_LIMIT_BW_MAX) { + pf_rate = 0; + } else { + pf_rate = (speed_convert[speed] / 100) * nic_io->mag_cfg.pf_bw_limit; + if (pf_rate == 0 && speed != SSSNIC_PORT_SPEED_NOT_SET) + pf_rate = 1; + } + + rate_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + rate_cfg.max_rate = pf_rate; + rate_cfg.min_rate = 0; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &rate_cfg)) { + nic_err(nic_dev->dev_hdl, "Fail to set rate:%u, ret: %d, state: 0x%x, out len: 0x%x\n", + pf_rate, ret, rate_cfg.head.state, out_len); + return rate_cfg.head.state ? rate_cfg.head.state : -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h new file mode 100644 index 00000000000000..20c0cf5991bee7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_H +#define SSS_NIC_CFG_H + +#include +#include + +#include "sss_nic_cfg_define.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_SUPPORT_FEATURE(nic_io, feature) \ + ((nic_io)->feature_cap & SSSNIC_F_##feature) +#define SSSNIC_SUPPORT_CSUM(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, CSUM) +#define SSSNIC_SUPPORT_SCTP_CRC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, SCTP_CRC) +#define SSSNIC_SUPPORT_TSO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, TSO) +#define SSSNIC_SUPPORT_UFO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, UFO) +#define SSSNIC_SUPPORT_LRO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, LRO) +#define SSSNIC_SUPPORT_RSS(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RSS) +#define SSSNIC_SUPPORT_RXVLAN_FILTER(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, RX_VLAN_FILTER) +#define SSSNIC_SUPPORT_VLAN_OFFLOAD(nic_io) \ + (SSSNIC_SUPPORT_FEATURE(nic_io, RX_VLAN_STRIP) && \ + SSSNIC_SUPPORT_FEATURE(nic_io, TX_VLAN_INSERT)) +#define SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, VXLAN_OFFLOAD) +#define SSSNIC_SUPPORT_IPSEC_OFFLOAD(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, IPSEC_OFFLOAD) +#define SSSNIC_SUPPORT_FDIR(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, FDIR) +#define SSSNIC_SUPPORT_PROMISC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, PROMISC) +#define SSSNIC_SUPPORT_ALLMULTI(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, ALLMULTI) +#define SSSNIC_SUPPORT_VF_MAC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, VF_MAC) +#define SSSNIC_SUPPORT_RATE_LIMIT(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RATE_LIMIT) +#define SSSNIC_SUPPORT_RXQ_RECOVERY(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RXQ_RECOVERY) + +int sss_nic_set_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel); + +int sss_nic_del_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel); + +int sss_nic_add_tcam_rule(struct sss_nic_dev *nic_dev, struct sss_nic_tcam_rule_cfg *tcam_rule); +int sss_nic_del_tcam_rule(struct sss_nic_dev *nic_dev, u32 index); + +int sss_nic_alloc_tcam_block(struct sss_nic_dev *nic_dev, u16 *index); +int sss_nic_free_tcam_block(struct sss_nic_dev *nic_dev, u16 *index); + +int sss_nic_set_fdir_tcam_rule_filter(struct sss_nic_dev *nic_dev, bool enable); + +int sss_nic_flush_tcam_rule(struct sss_nic_dev *nic_dev); + +int sss_nic_update_mac(struct sss_nic_dev *nic_dev, u8 *new_mac); + +int sss_nic_get_default_mac(struct sss_nic_dev *nic_dev, u8 *mac_addr); + +int sss_nic_set_dev_mtu(struct sss_nic_dev *nic_dev, u16 new_mtu); + +int sss_nic_get_vport_stats(struct sss_nic_dev *nic_dev, + u16 func_id, struct sss_nic_port_stats *stats); + +int sss_nic_force_drop_tx_pkt(struct sss_nic_dev *nic_dev); + +int sss_nic_set_rx_mode(struct sss_nic_dev *nic_dev, u32 rx_mode); + +int sss_nic_set_rx_vlan_offload(struct sss_nic_dev *nic_dev, bool en); + +int sss_nic_set_rx_lro_state(struct sss_nic_dev *nic_dev, bool en, u32 timer, u32 max_pkt_len); + +int sss_nic_config_vlan(struct sss_nic_dev *nic_dev, u8 opcode, u16 vlan_id); + +int sss_nic_set_hw_vport_state(struct sss_nic_dev *nic_dev, + u16 func_id, bool enable, u16 channel); + +int sss_nic_set_dcb_info(struct sss_nic_io *nic_io, struct sss_nic_dcb_info *dcb_info); + +int sss_nic_set_hw_dcb_state(struct sss_nic_dev *nic_dev, u8 op_code, u8 state); + +int sss_nic_clear_hw_qp_resource(struct sss_nic_dev *nic_dev); + +int sss_nic_get_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg *pause_config); + +int sss_nic_set_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg pause_config); + +int sss_nic_set_vlan_fliter(struct sss_nic_dev *nic_dev, bool en); + +int sss_nic_update_mac_vlan(struct sss_nic_dev *nic_dev, + u16 old_vlan, u16 new_vlan, int vf_id); + +int sss_nic_cache_out_qp_resource(struct sss_nic_io *nic_io); + +int sss_nic_set_feature_to_hw(struct sss_nic_io *nic_io); + +void sss_nic_update_nic_feature(struct sss_nic_dev *nic_dev, u64 feature); + +int sss_nic_io_init(struct sss_nic_dev *nic_dev); + +void sss_nic_io_deinit(struct sss_nic_dev *nic_dev); + +int sss_nic_rq_hw_pc_info(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_pc_info *out_info, u16 num_qps, u16 wqe_type); +int sss_nic_set_pf_rate(struct sss_nic_dev *nic_dev, u8 speed); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c new file mode 100644 index 00000000000000..573cf72f3b3968 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_dcb.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" + +u8 sss_nic_get_user_cos_num(struct sss_nic_dev *nic_dev) +{ + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_user_cos_num; + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_user_cos_num; + return 0; +} + +u8 sss_nic_get_valid_cos_map(struct sss_nic_dev *nic_dev) +{ + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_valid_cos_map; + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_valid_cos_map; + return 0; +} + +void sss_nic_update_qp_cos_map(struct sss_nic_dev *nic_dev, u8 cos_num) +{ + u8 cur_cos_num = 0; + u8 cos_per_qp_num; + u8 qp_num; + u8 qp_offset; + u8 i; + u8 remain; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + u8 valid_cos_map; + + if (cos_num == 0) + return; + + cos_per_qp_num = (u8)(nic_dev->qp_res.qp_num / cos_num); + if (cos_per_qp_num == 0) + return; + + remain = nic_dev->qp_res.qp_num % cos_per_qp_num; + valid_cos_map = sss_nic_get_valid_cos_map(nic_dev); + + memset(dcb_config->cos_qp_num, 0, sizeof(dcb_config->cos_qp_num)); + memset(dcb_config->cos_qp_offset, 0, sizeof(dcb_config->cos_qp_offset)); + + for (i = 0; i < SSSNIC_PCP_UP_MAX; i++) { + if (BIT(i) & valid_cos_map) { + qp_num = cos_per_qp_num; + qp_offset = (u8)(cur_cos_num * cos_per_qp_num); + + if (cur_cos_num < remain) { + qp_offset += cur_cos_num; + qp_num++; + } else { + qp_offset += remain; + } + + valid_cos_map -= (u8)BIT(i); + cur_cos_num++; + + dcb_config->cos_qp_num[i] = qp_num; + dcb_config->cos_qp_offset[i] = qp_offset; + sss_nic_info(nic_dev, drv, "Qp info: cos %u, qp_offset=%u qp_num=%u\n", + i, qp_offset, qp_num); + } + } + + memcpy(nic_dev->backup_dcb_cfg.cos_qp_num, dcb_config->cos_qp_num, + sizeof(dcb_config->cos_qp_num)); + memcpy(nic_dev->backup_dcb_cfg.cos_qp_offset, dcb_config->cos_qp_offset, + sizeof(dcb_config->cos_qp_offset)); +} + +static void sss_nic_set_sq_cos(struct sss_nic_dev *nic_dev, + u16 qid_start, u16 qid_end, u8 cos) +{ + u16 qid; + + for (qid = qid_start; qid < qid_end; qid++) + nic_dev->sq_desc_group[qid].cos = cos; +} + +void sss_nic_update_sq_cos(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + u8 i; + u16 q_num; + u16 qid_start; + u16 qid_end; + + sss_nic_set_sq_cos(nic_dev, 0, nic_dev->qp_res.qp_num, + nic_dev->hw_dcb_cfg.default_cos); + + if (dcb_en == 0) + return; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) { + q_num = (u16)nic_dev->hw_dcb_cfg.cos_qp_num[i]; + if (q_num == 0) + continue; + + qid_start = (u16)nic_dev->hw_dcb_cfg.cos_qp_offset[i]; + qid_end = qid_start + q_num; + sss_nic_set_sq_cos(nic_dev, qid_start, qid_end, i); + sss_nic_info(nic_dev, drv, "Update tx db cos, qid_start=%u, qid_end=%u cos=%u\n", + qid_start, qid_end, i); + } +} + +static int sss_nic_init_tx_cos_info(struct sss_nic_dev *nic_dev) +{ + int ret; + struct sss_nic_dcb_info dcb_info = {0}; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + + dcb_info.default_cos = dcb_config->default_cos; + dcb_info.trust = dcb_config->trust; + memset(dcb_info.dscp2cos, dcb_config->default_cos, sizeof(dcb_info.dscp2cos)); + memset(dcb_info.pcp2cos, dcb_config->default_cos, sizeof(dcb_info.pcp2cos)); + + ret = sss_nic_set_dcb_info(nic_dev->nic_io, &dcb_info); + if (ret != 0) + sss_nic_err(nic_dev, drv, "Fail to set dcb state, ret: %d\n", ret); + + return ret; +} + +static u8 sss_nic_get_cos_num(u8 cos_bitmap) +{ + u8 i; + u8 cos_count = 0; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) + if (cos_bitmap & BIT(i)) + cos_count++; + + return cos_count; +} + +void sss_nic_sync_dcb_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_config) +{ + struct sss_nic_dcb_config *hw_config = &nic_dev->hw_dcb_cfg; + + memcpy(hw_config, dcb_config, sizeof(*dcb_config)); +} + +static int sss_nic_init_dcb_cfg(struct sss_nic_dev *nic_dev, + struct sss_nic_dcb_config *dcb_config) +{ + u8 func_cos_bitmap; + u8 port_cos_bitmap; + int ret; + u8 i; + u8 j; + + ret = sss_get_cos_valid_bitmap(nic_dev->hwdev, &func_cos_bitmap, &port_cos_bitmap); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to get cos valid bitmap, ret: %d\n", ret); + return -EFAULT; + } + + nic_dev->max_cos_num = sss_nic_get_cos_num(func_cos_bitmap); + nic_dev->dft_port_cos_bitmap = port_cos_bitmap; + nic_dev->dft_func_cos_bitmap = func_cos_bitmap; + + dcb_config->dscp_user_cos_num = nic_dev->max_cos_num; + dcb_config->pcp_user_cos_num = nic_dev->max_cos_num; + dcb_config->dscp_valid_cos_map = func_cos_bitmap; + dcb_config->pcp_valid_cos_map = func_cos_bitmap; + dcb_config->trust = DCB_PCP; + dcb_config->default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) { + dcb_config->pcp2cos[i] = func_cos_bitmap & BIT(i) ? i : dcb_config->default_cos; + for (j = 0; j < SSSNIC_DCB_COS_MAX; j++) + dcb_config->dscp2cos[i * SSSNIC_DCB_DSCP_NUM + j] = dcb_config->pcp2cos[i]; + } + + return 0; +} + +static void sss_nic_reset_dcb_config(struct sss_nic_dev *nic_dev) +{ + memset(&nic_dev->hw_dcb_cfg, 0, sizeof(nic_dev->hw_dcb_cfg)); + sss_nic_init_dcb_cfg(nic_dev, &nic_dev->hw_dcb_cfg); + sss_nic_info(nic_dev, drv, "Success to reset bcb confg\n"); +} + +int sss_nic_update_dcb_cfg(struct sss_nic_dev *nic_dev) +{ + int ret; + + ret = sss_nic_set_hw_dcb_state(nic_dev, SSSNIC_MBX_OPCODE_SET_DCB_STATE, + !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to set dcb state, ret: %d\n", ret); + return ret; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) + sss_nic_sync_dcb_cfg(nic_dev, &nic_dev->backup_dcb_cfg); + else + sss_nic_reset_dcb_config(nic_dev); + + return 0; +} + +int sss_nic_dcb_init(struct sss_nic_dev *nic_dev) +{ + int ret; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + dcb_config->default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + return 0; + } + + ret = sss_nic_init_dcb_cfg(nic_dev, dcb_config); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to init dcb, ret: %d\n", ret); + return ret; + } + sss_nic_info(nic_dev, drv, "Support num cos %u, default cos %u\n", + nic_dev->max_cos_num, dcb_config->default_cos); + + memcpy(&nic_dev->backup_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(nic_dev->hw_dcb_cfg)); + + ret = sss_nic_init_tx_cos_info(nic_dev); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to set tx cos info, ret: %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h new file mode 100644 index 00000000000000..00a649598f286e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DCB_H +#define SSS_NIC_DCB_H + +#include "sss_kernel.h" +#include "sss_nic_dcb_define.h" + +enum SSSNIC_DCB_FLAGS { + SSSNIC_DCB_UP_COS_SETTING, + SSSNIC_DCB_TRAFFIC_STOPPED, +}; + +enum sss_nic_dcb_trust { + DCB_PCP, + DCB_DSCP, +}; + +u8 sss_nic_get_user_cos_num(struct sss_nic_dev *nic_dev); +u8 sss_nic_get_valid_cos_map(struct sss_nic_dev *nic_dev); +int sss_nic_dcb_init(struct sss_nic_dev *nic_dev); +int sss_nic_update_dcb_cfg(struct sss_nic_dev *nic_dev); +void sss_nic_update_sq_cos(struct sss_nic_dev *nic_dev, u8 dcb_en); +void sss_nic_update_qp_cos_map(struct sss_nic_dev *nic_dev, u8 cos_num); +void sss_nic_sync_dcb_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_config); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c new file mode 100644 index 00000000000000..af76eb584ae31d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_rss.h" +#include "sss_nic_ethtool_api.h" +#include "sss_nic_ethtool_stats.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_ethtool.h" + +#define SSSNIC_MGMT_VERSION_MAX_LEN 32 + +#define SSSNIC_AUTONEG_RESET_TIMEOUT 100 +#define SSSNIC_AUTONEG_FINISH_TIMEOUT 200 + +static void sss_nic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + u8 mgmt_ver[SSSNIC_MGMT_VERSION_MAX_LEN] = {0}; + int ret; + + strscpy(drvinfo->driver, SSSNIC_DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, SSSNIC_DRV_VERSION, sizeof(drvinfo->version)); + strscpy(drvinfo->bus_info, pci_name(pdev), sizeof(drvinfo->bus_info)); + + ret = sss_get_mgmt_version(nic_dev->hwdev, mgmt_ver, + SSSNIC_MGMT_VERSION_MAX_LEN, + SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to get fw version, ret: %d\n", ret); + return; + } + + ret = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%s", mgmt_ver); + if (ret < 0) + nicif_err(nic_dev, drv, netdev, "Fail to snprintf fw version\n"); +} + +static u32 sss_nic_get_msglevel(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->msg_enable; +} + +static void sss_nic_set_msglevel(struct net_device *netdev, u32 msg_enable) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + nicif_info(nic_dev, drv, netdev, "Success to change msg_enable from 0x%x to 0x%x\n", + nic_dev->msg_enable, msg_enable); + + nic_dev->msg_enable = msg_enable; +} + +static int sss_nic_nway_reset(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + int ret; + + while (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_AUTONEG_RESET)) + msleep(SSSNIC_AUTONEG_RESET_TIMEOUT); + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to get port info\n"); + ret = -EFAULT; + goto reset_err; + } + + if (port_info.autoneg_state != SSSNIC_PORT_CFG_AN_ON) { + nicif_err(nic_dev, drv, netdev, "Autonegotiation is not on, don't support to restart it\n"); + ret = -EOPNOTSUPP; + goto reset_err; + } + + ret = sss_nic_set_autoneg(nic_dev, false); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to set autonegotiation off\n"); + ret = -EFAULT; + goto reset_err; + } + + msleep(SSSNIC_AUTONEG_FINISH_TIMEOUT); + + ret = sss_nic_set_autoneg(nic_dev, true); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to set autonegotiation on\n"); + ret = -EFAULT; + goto reset_err; + } + + msleep(SSSNIC_AUTONEG_FINISH_TIMEOUT); + nicif_info(nic_dev, drv, netdev, "Success to restart autonegotiation\n"); + +reset_err: + clear_bit(SSSNIC_AUTONEG_RESET, &nic_dev->flags); + return ret; +} + +static void sss_nic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam __maybe_unused *param, + struct netlink_ext_ack __maybe_unused *extack) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + ringparam->tx_pending = nic_dev->sq_desc_group[0].q_depth; + ringparam->rx_pending = nic_dev->rq_desc_group[0].q_depth; + ringparam->tx_max_pending = SSSNIC_MAX_TX_QUEUE_DEPTH; + ringparam->rx_max_pending = SSSNIC_MAX_RX_QUEUE_DEPTH; +} + +static int sss_nic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam __maybe_unused *param, + struct netlink_ext_ack __maybe_unused *extack) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_qp_resource qp_res = {0}; + u32 sq_depth; + u32 rq_depth; + int ret; + + ret = sss_nic_check_ringparam_valid(netdev, ringparam); + if (ret != 0) + return ret; + + sq_depth = (u32)(1U << (u16)ilog2(ringparam->tx_pending)); + rq_depth = (u32)(1U << (u16)ilog2(ringparam->rx_pending)); + if (sq_depth == nic_dev->qp_res.sq_depth && + rq_depth == nic_dev->qp_res.rq_depth) + return 0; /* nothing to do */ + + nicif_info(nic_dev, drv, netdev, + "Change Tx/Rx ring depth from %u/%u to %u/%u\n", + nic_dev->qp_res.sq_depth, nic_dev->qp_res.rq_depth, + sq_depth, rq_depth); + + if (netif_running(netdev) == 0) { + sss_nic_update_qp_depth(nic_dev, sq_depth, rq_depth); + return 0; + } + + qp_res = nic_dev->qp_res; + qp_res.sq_depth = sq_depth; + qp_res.rq_depth = rq_depth; + qp_res.sq_res_group = NULL; + qp_res.rq_res_group = NULL; + qp_res.irq_cfg = NULL; + + nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); + ret = sss_nic_update_channel_setting(nic_dev, &qp_res, + NULL, NULL); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update channel settings\n"); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +{ + return sss_nic_ethtool_get_coalesce(netdev, coal, SSSNIC_COALESCE_ALL_QUEUE); +} + +static int sss_nic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +{ + return sss_nic_ethtool_set_coalesce(netdev, coal, SSSNIC_COALESCE_ALL_QUEUE); +} + +static int sss_nic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_get_coalesce(netdev, coal, (u16)queue); +} + +static int sss_nic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_set_coalesce(netdev, coal, (u16)queue); +} + +static int sss_nic_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + enum sss_nic_mag_led_type led_type = SSSNIC_MAG_LED_TYPE_ALARM; + enum sss_nic_mag_led_mode led_mode; + int ret; + + if (state == ETHTOOL_ID_ACTIVE) { + led_mode = SSSNIC_MAG_LED_FORCE_BLINK_2HZ; + } else if (state == ETHTOOL_ID_INACTIVE) { + led_mode = SSSNIC_MAG_LED_DEFAULT; + } else { + nicif_err(nic_dev, drv, netdev, "Not support to set phys id, state:%d\n", state); + return -EOPNOTSUPP; + } + + ret = sss_nic_set_hw_led_state(nic_dev, led_type, led_mode); + if (ret != 0) + nicif_err(nic_dev, drv, netdev, "Fail to set led status, ret:%d, type:%d, mode:%d\n", + ret, led_type, led_mode); + + return ret; +} + +static void sss_nic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + int ret; + struct sss_nic_pause_cfg pause_config = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + ret = sss_nic_get_hw_pause_info(nic_dev, &pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to get pauseparam\n"); + } else { + pauseparam->autoneg = pause_config.auto_neg == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + pauseparam->rx_pause = pause_config.rx_pause; + pauseparam->tx_pause = pause_config.tx_pause; + } +} + +static int sss_nic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + struct sss_nic_pause_cfg pause_config = {0}; + u32 auto_neg; + int ret; + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to get auto-negotiation state\n"); + return -EFAULT; + } + + auto_neg = port_info.autoneg_state == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (pauseparam->autoneg != auto_neg) { + nicif_err(nic_dev, drv, netdev, + "Use: ethtool -s autoneg to change autoneg\n"); + return -EOPNOTSUPP; + } + pause_config.auto_neg = pauseparam->autoneg == AUTONEG_ENABLE ? + SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF; + pause_config.rx_pause = (u8)pauseparam->rx_pause; + pause_config.tx_pause = (u8)pauseparam->tx_pause; + + ret = sss_nic_set_hw_pause_info(nic_dev, pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set pauseparam\n"); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set pauseparam option, rx: %s, tx: %s\n", + pauseparam->rx_pause ? "on" : "off", pauseparam->tx_pause ? "on" : "off"); + + return 0; +} + +static int sss_nic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + sss_nic_get_module_info_t handler[SSSNIC_MODULE_TYPE_MAX] = {NULL}; + u8 sfp_type = 0; + u8 sfp_type_ext = 0; + int ret; + + handler[SSSNIC_MODULE_TYPE_SFP] = sss_nic_module_type_sfp; + handler[SSSNIC_MODULE_TYPE_QSFP] = sss_nic_module_type_qsfp; + handler[SSSNIC_MODULE_TYPE_QSFP_PLUS] = sss_nic_module_type_qsfp_plus; + handler[SSSNIC_MODULE_TYPE_QSFP28] = sss_nic_module_type_qsfp28; + + ret = sss_nic_get_sfp_type(nic_dev, &sfp_type, &sfp_type_ext); + if (ret != 0) + return ret; + + if (sfp_type >= SSSNIC_MODULE_TYPE_MAX) { + nicif_warn(nic_dev, drv, netdev, + "Unknown optical module type: 0x%x\n", sfp_type); + return -EINVAL; + } + + if (!handler[sfp_type]) { + nicif_warn(nic_dev, drv, netdev, + "Unknown optical module type: 0x%x\n", sfp_type); + return -EINVAL; + } + + handler[sfp_type](modinfo, sfp_type_ext); + + return 0; +} + +static int sss_nic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[SSSNIC_STD_SFP_INFO_MAX_SIZE]; + u32 offset = ee->len + ee->offset; + u32 len = ee->len; + int ret; + + if (len == 0 || offset > SSSNIC_STD_SFP_INFO_MAX_SIZE) + return -EINVAL; + + memset(data, 0, len); + + ret = sss_nic_get_sfp_eeprom(nic_dev, (u8 *)sfp_data, len); + if (ret != 0) + return ret; + + memcpy(data, sfp_data + ee->offset, len); + + return 0; +} + +static u32 sss_nic_get_priv_flags(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 ret_flag = 0; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) + ret_flag |= SSSNIC_PRIV_FLAG_SYMM_RSS; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + ret_flag |= SSSNIC_PRIV_FLAG_LINK_UP; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + ret_flag |= SSSNIC_PRIV_FLAG_RQ_RECOVERY; + + return ret_flag; +} + +static int sss_nic_set_priv_flags(struct net_device *netdev, u32 flags) +{ + int ret; + + ret = sss_nic_set_symm_rss_flag(netdev, flags); + if (ret) + return ret; + + ret = sss_nic_set_rq_recovery_flag(netdev, flags); + if (ret) + return ret; + + return sss_nic_set_force_link_flag(netdev, flags); +} + +static void sss_nic_self_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, SSSNIC_LP_TEST_TYPE_MAX * sizeof(u64)); + sss_nic_loop_test(netdev, eth_test, data); +} + +static const struct ethtool_ops g_nic_ethtool_ops = { + .supported_coalesce_params = SSSNIC_SUPPORTED_COALESCE_PARAMS, + + .get_link_ksettings = sss_nic_get_link_ksettings, + .set_link_ksettings = sss_nic_set_link_ksettings, + + .get_drvinfo = sss_nic_get_drvinfo, + .get_msglevel = sss_nic_get_msglevel, + .set_msglevel = sss_nic_set_msglevel, + .nway_reset = sss_nic_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = sss_nic_get_ringparam, + .set_ringparam = sss_nic_set_ringparam, + .get_pauseparam = sss_nic_get_pauseparam, + .set_pauseparam = sss_nic_set_pauseparam, + .get_sset_count = sss_nic_get_sset_count, + .get_ethtool_stats = sss_nic_get_ethtool_stats, + .get_strings = sss_nic_get_strings, + + .self_test = sss_nic_self_test, + + .set_phys_id = sss_nic_set_phys_id, + + .get_coalesce = sss_nic_get_coalesce, + .set_coalesce = sss_nic_set_coalesce, + + .get_per_queue_coalesce = sss_nic_get_per_queue_coalesce, + .set_per_queue_coalesce = sss_nic_set_per_queue_coalesce, + + .get_rxnfc = sss_nic_get_rxnfc, + .set_rxnfc = sss_nic_set_rxnfc, + .get_priv_flags = sss_nic_get_priv_flags, + .set_priv_flags = sss_nic_set_priv_flags, + + .get_channels = sss_nic_get_channels, + .set_channels = sss_nic_set_channels, + + .get_module_info = sss_nic_get_module_info, + .get_module_eeprom = sss_nic_get_module_eeprom, + + .get_rxfh_indir_size = sss_nic_get_rxfh_indir_size, + + .get_rxfh_key_size = sss_nic_get_rxfh_key_size, + .get_rxfh = sss_nic_get_rxfh, + .set_rxfh = sss_nic_set_rxfh, +}; + +static const struct ethtool_ops g_nicvf_ethtool_ops = { + .supported_coalesce_params = SSSNIC_SUPPORTED_COALESCE_PARAMS, + + .get_link_ksettings = sss_nic_get_link_ksettings, + + .get_drvinfo = sss_nic_get_drvinfo, + .get_msglevel = sss_nic_get_msglevel, + .set_msglevel = sss_nic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = sss_nic_get_ringparam, + + .set_ringparam = sss_nic_set_ringparam, + .get_sset_count = sss_nic_get_sset_count, + .get_ethtool_stats = sss_nic_get_ethtool_stats, + .get_strings = sss_nic_get_strings, + + .get_coalesce = sss_nic_get_coalesce, + .set_coalesce = sss_nic_set_coalesce, + + .get_per_queue_coalesce = sss_nic_get_per_queue_coalesce, + .set_per_queue_coalesce = sss_nic_set_per_queue_coalesce, + + .get_rxnfc = sss_nic_get_rxnfc, + .set_rxnfc = sss_nic_set_rxnfc, + .get_priv_flags = sss_nic_get_priv_flags, + .set_priv_flags = sss_nic_set_priv_flags, + + .get_channels = sss_nic_get_channels, + .set_channels = sss_nic_set_channels, + + .get_rxfh_indir_size = sss_nic_get_rxfh_indir_size, + + .get_rxfh_key_size = sss_nic_get_rxfh_key_size, + .get_rxfh = sss_nic_get_rxfh, + .set_rxfh = sss_nic_set_rxfh, + +}; + +void sss_nic_set_ethtool_ops(struct sss_nic_dev *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (!SSSNIC_FUNC_IS_VF(adapter->hwdev)) + netdev->ethtool_ops = &g_nic_ethtool_ops; + else + netdev->ethtool_ops = &g_nicvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h new file mode 100644 index 00000000000000..d27145371df193 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_H +#define SSS_NIC_ETHTOOL_H + +#include + +void sss_nic_set_ethtool_ops(struct sss_nic_dev *adapter); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c new file mode 100644 index 00000000000000..5befd916f414c5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c @@ -0,0 +1,810 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_rss.h" +#include "sss_nic_ethtool_stats.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_ethtool_api.h" + +#define SSSNIC_COALESCE_PENDING_LIMIT_UNIT 8 +#define SSSNIC_COALESCE_TIMER_CFG_UNIT 5 +#define SSSNIC_COALESCE_MAX_PENDING_LIMIT (255 * SSSNIC_COALESCE_PENDING_LIMIT_UNIT) +#define SSSNIC_COALESCE_MAX_TIMER_CFG (255 * SSSNIC_COALESCE_TIMER_CFG_UNIT) +#define SSSNIC_WAIT_PKTS_TO_RX_BUFFER 200 +#define SSSNIC_WAIT_CLEAR_LP_TEST 100 + +#define SSSNIC_CHECK_COALESCE_ALIGN(coal, item, unit) \ +do { \ + if ((coal)->item % (unit)) \ + nicif_warn(nic_dev, drv, netdev, \ + "%s in %d units, change to %u\n", \ + #item, (unit), ((coal)->item - \ + (coal)->item % (unit))); \ +} while (0) + +#define SSSNIC_CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ +do { \ + if (((coal)->item / (unit)) != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %d to %u %s\n", \ + #item, (ori_val) * (unit), \ + ((coal)->item - (coal)->item % (unit)), \ + (obj_str)); \ +} while (0) + +#define SSSNIC_CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ +do { \ + if ((coal)->item != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %llu to %u %s\n", \ + #item, (ori_val), (coal)->item, (obj_str)); \ +} while (0) + +#define SSSNIC_PORT_DOWN_ERR_ID 0 +#define SSSNIC_LP_DEF_TIME 5 /* seconds */ + +#define SSSNIC_TEST_TIME_MULTIPLE 5 + +#define SSSNIC_INTERNAL_LP_MODE 5 + +#define SSSNIC_WAIT_LOOP_TEST_FINISH_TIMEOUT 5000 + +void sss_nic_update_qp_depth(struct sss_nic_dev *nic_dev, + u32 sq_depth, u32 rq_depth) +{ + u16 i; + + nic_dev->qp_res.sq_depth = sq_depth; + nic_dev->qp_res.rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qp_num; i++) { + nic_dev->sq_desc_group[i].q_depth = sq_depth; + nic_dev->rq_desc_group[i].q_depth = rq_depth; + nic_dev->sq_desc_group[i].qid_mask = sq_depth - 1; + nic_dev->rq_desc_group[i].qid_mask = rq_depth - 1; + } +} + +int sss_nic_check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ringparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (ringparam->rx_mini_pending != 0 || ringparam->rx_jumbo_pending != 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupport rx_mini_pending: %u, rx_jumbo_pending: %u\n", + ringparam->rx_mini_pending, ringparam->rx_jumbo_pending); + return -EINVAL; + } + + if (ringparam->tx_pending < SSSNIC_MIN_QUEUE_DEPTH || + ringparam->tx_pending > SSSNIC_MAX_TX_QUEUE_DEPTH || + ringparam->rx_pending < SSSNIC_MIN_QUEUE_DEPTH || + ringparam->rx_pending > SSSNIC_MAX_RX_QUEUE_DEPTH) { + nicif_err(nic_dev, drv, netdev, + "Queue depth out of range tx[%d-%d] rx[%d-%d]\n", + ringparam->tx_pending, ringparam->tx_pending, + ringparam->rx_pending, ringparam->rx_pending); + return -EINVAL; + } + + return 0; +} + +void sss_nic_intr_coal_to_ethtool_coal(struct ethtool_coalesce *ethtool_coal, + struct sss_nic_intr_coal_info *nic_coal) +{ + ethtool_coal->rx_coalesce_usecs = nic_coal->coalesce_timer * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + ethtool_coal->tx_coalesce_usecs = ethtool_coal->rx_coalesce_usecs; + ethtool_coal->rx_coalesce_usecs_low = nic_coal->rx_usecs_low * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + ethtool_coal->rx_coalesce_usecs_high = nic_coal->rx_usecs_high * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + + ethtool_coal->rx_max_coalesced_frames = nic_coal->pending_limt * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + ethtool_coal->tx_max_coalesced_frames = + ethtool_coal->rx_max_coalesced_frames; + ethtool_coal->rx_max_coalesced_frames_low = + nic_coal->rx_pending_limt_low * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + ethtool_coal->rx_max_coalesced_frames_high = + nic_coal->rx_pending_limt_high * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + + ethtool_coal->pkt_rate_low = (u32)nic_coal->pkt_rate_low; + ethtool_coal->pkt_rate_high = (u32)nic_coal->pkt_rate_high; +} + +int sss_nic_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethtool_coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + + if (queue == SSSNIC_COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + intr_coal_info = &nic_dev->coal_info[0]; + } else { + if (queue >= nic_dev->qp_res.qp_num) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + intr_coal_info = &nic_dev->coal_info[queue]; + } + + sss_nic_intr_coal_to_ethtool_coal(ethtool_coal, intr_coal_info); + ethtool_coal->use_adaptive_rx_coalesce = + nic_dev->use_adaptive_rx_coalesce; + + return 0; +} + +int sss_nic_set_hw_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, struct sss_nic_intr_coal_info *coal) +{ + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + struct sss_irq_cfg irq_cfg = {0}; + struct net_device *netdev = nic_dev->netdev; + int ret; + + intr_coal_info = &nic_dev->coal_info[qid]; + if (intr_coal_info->coalesce_timer != coal->coalesce_timer || + intr_coal_info->pending_limt != coal->pending_limt) + intr_coal_info->user_set_intr_coal_flag = 1; + + intr_coal_info->coalesce_timer = coal->coalesce_timer; + intr_coal_info->pending_limt = coal->pending_limt; + intr_coal_info->rx_pending_limt_low = coal->rx_pending_limt_low; + intr_coal_info->rx_pending_limt_high = coal->rx_pending_limt_high; + intr_coal_info->pkt_rate_low = coal->pkt_rate_low; + intr_coal_info->pkt_rate_high = coal->pkt_rate_high; + intr_coal_info->rx_usecs_low = coal->rx_usecs_low; + intr_coal_info->rx_usecs_high = coal->rx_usecs_high; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP) || + qid >= nic_dev->qp_res.qp_num || + nic_dev->use_adaptive_rx_coalesce != 0) + return 0; + + irq_cfg.msix_id = nic_dev->qp_res.irq_cfg[qid].msix_id; + irq_cfg.lli_set = 0; + irq_cfg.coalesc_intr_set = 1; + irq_cfg.coalesc_timer = intr_coal_info->coalesce_timer; + irq_cfg.resend_timer = intr_coal_info->resend_timer; + irq_cfg.pending = intr_coal_info->pending_limt; + nic_dev->rq_desc_group[qid].last_coal_timer = + intr_coal_info->coalesce_timer; + nic_dev->rq_desc_group[qid].last_pending_limt = intr_coal_info->pending_limt; + ret = sss_chip_set_msix_attr(nic_dev->hwdev, irq_cfg, + SSS_CHANNEL_NIC); + if (ret != 0) + nicif_warn(nic_dev, drv, netdev, + "Fail to set queue%u coalesce", qid); + + return ret; +} + +int sss_nic_check_coal_param_support(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->cmd & ~SSSNIC_SUPPORTED_COALESCE_PARAMS) { + nicif_err(nic_dev, drv, netdev, + "Only support to change rx/tx-usecs and rx/tx-frames\n"); + + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_check_coal_param_valid(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, + "Coal param: tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, + "Coal param: tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + + if (coal->rx_coalesce_usecs > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs_low out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_high > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs_high out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames > SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low > + SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames_low out of range[%d-%d]\n", + 0, SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_high > + SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames_high out of range[%d-%d]\n", + 0, SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_check_coal_param_range(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs_low / SSSNIC_COALESCE_TIMER_CFG_UNIT >= + coal->rx_coalesce_usecs_high / SSSNIC_COALESCE_TIMER_CFG_UNIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u)\n", + coal->rx_coalesce_usecs_high, + coal->rx_coalesce_usecs_low); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low / SSSNIC_COALESCE_PENDING_LIMIT_UNIT >= + coal->rx_max_coalesced_frames_high / SSSNIC_COALESCE_PENDING_LIMIT_UNIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: coalesced_frames_high(%u) must more than coalesced_frames_low(%u)\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low); + return -EOPNOTSUPP; + } + + if (coal->pkt_rate_low >= coal->pkt_rate_high) { + nicif_err(nic_dev, drv, netdev, + "Coal param: pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", + coal->pkt_rate_high, + coal->pkt_rate_low); + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_coalesce_check(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + int ret; + + ret = sss_nic_check_coal_param_support(netdev, coal); + if (ret != 0) + return ret; + + ret = sss_nic_check_coal_param_valid(netdev, coal); + if (ret != 0) + return ret; + + ret = sss_nic_check_coal_param_range(netdev, coal); + if (ret != 0) + return ret; + + return 0; +} + +int sss_nic_set_coal_param_to_hw(struct sss_nic_dev *nic_dev, + struct sss_nic_intr_coal_info *intr_coal_info, u16 queue) +{ + u16 i; + + if (queue < nic_dev->qp_res.qp_num) { + sss_nic_set_hw_intr_coal(nic_dev, queue, intr_coal_info); + return 0; + } else if (queue != SSSNIC_COALESCE_ALL_QUEUE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + + for (i = 0; i < nic_dev->max_qp_num; i++) + sss_nic_set_hw_intr_coal(nic_dev, i, intr_coal_info); + + return 0; +} + +void sss_nic_coalesce_align_check(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); +} + +void sss_nic_coalesce_change_check(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + char obj_str[32] = {0}; + + if (queue == SSSNIC_COALESCE_ALL_QUEUE) { + intr_coal_info = &nic_dev->coal_info[0]; + snprintf(obj_str, sizeof(obj_str), "for netdev"); + } else { + intr_coal_info = &nic_dev->coal_info[queue]; + snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); + } + + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->coalesce_timer, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->rx_usecs_low, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->rx_usecs_high, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->pending_limt, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->rx_pending_limt_low, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->rx_pending_limt_high, obj_str); + SSSNIC_CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, + intr_coal_info->pkt_rate_low, obj_str); + SSSNIC_CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, + intr_coal_info->pkt_rate_high, obj_str); +} + +void sss_nic_ethtool_coalesce_to_intr_coal_info(struct sss_nic_intr_coal_info *nic_coal, + struct ethtool_coalesce *ethtool_coal) +{ + nic_coal->coalesce_timer = + (u8)(ethtool_coal->rx_coalesce_usecs / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->pending_limt = (u8)(ethtool_coal->rx_max_coalesced_frames / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + nic_coal->pkt_rate_low = ethtool_coal->pkt_rate_low; + nic_coal->pkt_rate_high = ethtool_coal->pkt_rate_high; + nic_coal->rx_usecs_low = + (u8)(ethtool_coal->rx_coalesce_usecs_low / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->rx_usecs_high = + (u8)(ethtool_coal->rx_coalesce_usecs_high / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->rx_pending_limt_low = + (u8)(ethtool_coal->rx_max_coalesced_frames_low / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + nic_coal->rx_pending_limt_high = + (u8)(ethtool_coal->rx_max_coalesced_frames_high / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); +} + +int sss_nic_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info intr_coal_info = {0}; + u32 last_adaptive_rx; + int ret = 0; + + ret = sss_nic_coalesce_check(netdev, coal); + if (ret != 0) + return ret; + + sss_nic_coalesce_align_check(netdev, coal); + sss_nic_coalesce_change_check(netdev, coal, queue); + + sss_nic_ethtool_coalesce_to_intr_coal_info(&intr_coal_info, coal); + + last_adaptive_rx = nic_dev->use_adaptive_rx_coalesce; + nic_dev->use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + + if (nic_dev->use_adaptive_rx_coalesce == 0 && + (intr_coal_info.coalesce_timer == 0 || + intr_coal_info.pending_limt == 0)) + nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); + + if (SSS_CHANNEL_RES_VALID(nic_dev) != 0) { + if (nic_dev->use_adaptive_rx_coalesce == 0) + cancel_delayed_work_sync(&nic_dev->moderation_task); + else if (last_adaptive_rx == 0) + queue_delayed_work(nic_dev->workq, + &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + } + + return sss_nic_set_coal_param_to_hw(nic_dev, &intr_coal_info, queue); +} + +void sss_nic_module_type_sfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; +} + +void sss_nic_module_type_qsfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; +} + +void sss_nic_module_type_qsfp_plus(struct ethtool_modinfo *modinfo, u8 sfp_type_ext) +{ + if (sfp_type_ext < SSSNIC_SFP_TYPE_EXT_FLAG) { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } +} + +void sss_nic_module_type_qsfp28(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; +} + +int sss_nic_set_rq_recovery_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (flag & SSSNIC_PRIV_FLAG_RQ_RECOVERY) { + if (!SSSNIC_SUPPORT_RXQ_RECOVERY(nic_dev->nic_io)) { + nicif_info(nic_dev, drv, netdev, "Unsupport open rq recovery\n"); + return -EOPNOTSUPP; + } + + if (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + return 0; + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + nicif_info(nic_dev, drv, netdev, "Succss to open rq recovery\n"); + } else { + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + return 0; + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + nicif_info(nic_dev, drv, netdev, "Success to close rq recovery\n"); + } + + return 0; +} + +int sss_nic_set_symm_rss_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if ((flag & SSSNIC_PRIV_FLAG_SYMM_RSS) != 0) { + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + nicif_err(nic_dev, drv, netdev, "Fail to open Symmetric RSS while DCB is enabled\n"); + return -EOPNOTSUPP; + } + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) { + nicif_err(nic_dev, drv, netdev, "Fail to open Symmetric RSS while RSS is disabled\n"); + return -EOPNOTSUPP; + } + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX); + } else { + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX); + } + + return 0; +} + +void sss_nic_force_link_up(struct sss_nic_dev *nic_dev) +{ + if (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return; + + if (!SSS_CHANNEL_RES_VALID(nic_dev)) + return; + + if (netif_carrier_ok(nic_dev->netdev)) + return; + + nic_dev->link_status = true; + netif_carrier_on(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Set link up\n"); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, nic_dev->link_status); +} + +int sss_nic_force_link_down(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 link_status = 0; + + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return 0; + + if (!SSS_CHANNEL_RES_VALID(nic_dev)) + return 0; + + ret = sss_nic_get_hw_link_state(nic_dev, &link_status); + if (ret != 0) { + nicif_err(nic_dev, link, nic_dev->netdev, "Fail to get link state: %d\n", ret); + return ret; + } + + nic_dev->link_status = link_status; + + if (link_status != 0) { + if (netif_carrier_ok(nic_dev->netdev)) + return 0; + + netif_carrier_on(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Link state is up\n"); + } else { + if (!netif_carrier_ok(nic_dev->netdev)) + return 0; + + netif_carrier_off(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Link state is down\n"); + } + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, nic_dev->link_status); + + return ret; +} + +int sss_nic_set_force_link_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if ((flag & SSSNIC_PRIV_FLAG_LINK_UP) != 0) + sss_nic_force_link_up(nic_dev); + else + ret = sss_nic_force_link_down(nic_dev); + + return ret; +} + +static int sss_nic_finish_loop_test(struct sss_nic_dev *nic_dev, + struct sk_buff *skb_tmp, u32 test_time) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *loop_test_rx_buf = nic_dev->loop_test_rx_buf; + u32 cnt = test_time * SSSNIC_TEST_TIME_MULTIPLE; + struct sk_buff *skb = NULL; + int lp_pkt_cnt = nic_dev->loop_pkt_len; + unsigned char pkt_mark_data; + u32 i; + u32 j; + + for (i = 0; i < cnt; i++) { + nic_dev->loop_test_rx_cnt = 0; + memset(loop_test_rx_buf, 0, SSSNIC_LP_PKT_CNT * lp_pkt_cnt); + + for (j = 0; j < SSSNIC_LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + nicif_err(nic_dev, drv, netdev, + "Fail to copy skb for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[lp_pkt_cnt - 1] = j; + + if (sss_nic_loop_start_xmit(skb, netdev) != NETDEV_TX_OK) { + dev_kfree_skb_any(skb); + nicif_err(nic_dev, drv, netdev, + "Fail to xmit pkt for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkts received to RX buffer */ + msleep(SSSNIC_WAIT_PKTS_TO_RX_BUFFER); + + for (j = 0; j < SSSNIC_LP_PKT_CNT; j++) { + pkt_mark_data = *(loop_test_rx_buf + (j * lp_pkt_cnt) + (lp_pkt_cnt - 1)); + if (memcmp((loop_test_rx_buf + (j * lp_pkt_cnt)), + skb_tmp->data, (lp_pkt_cnt - 1)) != 0 || + pkt_mark_data != j) { + nicif_err(nic_dev, drv, netdev, + "Fail to compare pkt in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + (j + (i * SSSNIC_LP_PKT_CNT)), + (lp_pkt_cnt - 1), pkt_mark_data); + return -EIO; + } + } + } + + return 0; +} + +static struct sk_buff *sss_nic_alloc_loop_skb(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct sk_buff *skb = NULL; + struct ethhdr *eth_hdr = NULL; + u8 *test_data = NULL; + u32 i; + + skb = alloc_skb(nic_dev->loop_pkt_len, GFP_ATOMIC); + if (!skb) + return skb; + + eth_hdr = __skb_put(skb, ETH_HLEN); + eth_hdr->h_proto = htons(ETH_P_ARP); + ether_addr_copy(eth_hdr->h_dest, nic_dev->netdev->dev_addr); + eth_zero_addr(eth_hdr->h_source); + skb_reset_mac_header(skb); + + test_data = __skb_put(skb, nic_dev->loop_pkt_len - ETH_HLEN); + for (i = ETH_HLEN; i < nic_dev->loop_pkt_len; i++) + test_data[i] = i & 0xFF; + + skb->queue_mapping = 0; + skb->dev = netdev; + skb->protocol = htons(ETH_P_ARP); + + return skb; +} + +static int sss_nic_run_loop_test(struct sss_nic_dev *nic_dev, u32 test_time) +{ + struct net_device *netdev = nic_dev->netdev; + struct sk_buff *skb_tmp = NULL; + int ret; + + skb_tmp = sss_nic_alloc_loop_skb(nic_dev); + if (!skb_tmp) { + nicif_err(nic_dev, drv, netdev, + "Fail to create lp test skb for loopback test\n"); + return -ENOMEM; + } + + ret = sss_nic_finish_loop_test(nic_dev, skb_tmp, test_time); + if (ret != 0) { + dev_kfree_skb_any(skb_tmp); + return ret; + } + + dev_kfree_skb_any(skb_tmp); + nicif_info(nic_dev, drv, netdev, "Success to loopback test.\n"); + return 0; +} + +static int sss_nic_do_loop_test(struct sss_nic_dev *nic_dev, u32 *flags, + u32 test_time, enum sss_nic_lp_test_type *test_index) +{ + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + *test_index = SSSNIC_INTERNAL_LP_TEST; + if (sss_nic_set_loopback_mode(nic_dev, + SSSNIC_INTERNAL_LP_MODE, true)) { + nicif_err(nic_dev, drv, netdev, + "Fail to set port loopback mode before loopback test\n"); + return -EFAULT; + } + + /* suspend 5000 ms, waiting for port to stop receiving frames */ + msleep(SSSNIC_WAIT_LOOP_TEST_FINISH_TIMEOUT); + } else { + *test_index = SSSNIC_EXTERNAL_LP_TEST; + } + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST); + + if (sss_nic_run_loop_test(nic_dev, test_time)) + ret = -EFAULT; + + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST); + msleep(SSSNIC_WAIT_CLEAR_LP_TEST); + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + if (sss_nic_set_loopback_mode(nic_dev, + SSSNIC_INTERNAL_LP_MODE, false)) { + nicif_err(nic_dev, drv, netdev, + "Fail to cancel port loopback mode after loopback test\n"); + ret = -EFAULT; + } + } else { + *flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + return ret; +} + +void sss_nic_loop_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + enum sss_nic_lp_test_type test_type = SSSNIC_INTERNAL_LP_TEST; + u32 act_test_time = SSSNIC_LP_DEF_TIME; + u8 link_state = 0; + int ret; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_err(nic_dev, drv, netdev, + "Fail to entry loopback test when netdev is closed\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[SSSNIC_PORT_DOWN_ERR_ID] = 1; + return; + } + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ret = sss_nic_do_loop_test(nic_dev, ð_test->flags, act_test_time, &test_type); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_type] = 1; + } + + netif_tx_wake_all_queues(netdev); + + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (!ret && link_state) + netif_carrier_on(netdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h new file mode 100644 index 00000000000000..9cfb72b2668d5f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_API_H +#define SSS_NIC_ETHTOOL_API_H + +#include + +#define SSSNIC_PRIV_FLAG_SYMM_RSS BIT(0) +#define SSSNIC_PRIV_FLAG_LINK_UP BIT(1) +#define SSSNIC_PRIV_FLAG_RQ_RECOVERY BIT(2) + +#define SSSNIC_COALESCE_ALL_QUEUE 0xFFFF + +#define SSSNIC_SFP_TYPE_EXT_FLAG 0x3 + +typedef void (*sss_nic_get_module_info_t)(struct ethtool_modinfo *modinfo, u8 sfp_type_ext); + +enum sss_nic_lp_test_type { + SSSNIC_INTERNAL_LP_TEST = 0, + SSSNIC_EXTERNAL_LP_TEST = 1, + SSSNIC_LP_TEST_TYPE_MAX = 2, +}; + +enum module_type { + SSSNIC_MODULE_TYPE_SFP = 0x3, + SSSNIC_MODULE_TYPE_QSFP = 0x0C, + SSSNIC_MODULE_TYPE_QSFP_PLUS = 0x0D, + SSSNIC_MODULE_TYPE_QSFP28 = 0x11, + SSSNIC_MODULE_TYPE_MAX, +}; + +void sss_nic_update_qp_depth(struct sss_nic_dev *nic_dev, + u32 sq_depth, u32 rq_depth); +int sss_nic_check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ringparam); +void sss_nic_intr_coal_to_ethtool_coal(struct ethtool_coalesce *ethtool_coal, + struct sss_nic_intr_coal_info *nic_coal); +int sss_nic_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethtool_coal, u16 queue); +int sss_nic_set_hw_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, struct sss_nic_intr_coal_info *coal); +int sss_nic_check_coal_param_support(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_check_coal_param_valid(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_check_coal_param_range(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_coalesce_check(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_set_coal_param_to_hw(struct sss_nic_dev *nic_dev, + struct sss_nic_intr_coal_info *intr_coal_info, u16 queue); +void sss_nic_coalesce_align_check(struct net_device *netdev, + struct ethtool_coalesce *coal); +void sss_nic_coalesce_change_check(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue); +void sss_nic_ethtool_coalesce_to_intr_coal_info(struct sss_nic_intr_coal_info *nic_coal, + struct ethtool_coalesce *ethtool_coal); +int sss_nic_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue); +void sss_nic_module_type_sfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +void sss_nic_module_type_qsfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +void sss_nic_module_type_qsfp_plus(struct ethtool_modinfo *modinfo, u8 sfp_type_ext); +void sss_nic_module_type_qsfp28(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +int sss_nic_set_rq_recovery_flag(struct net_device *netdev, + u32 flag); +int sss_nic_set_symm_rss_flag(struct net_device *netdev, u32 flag); +void sss_nic_force_link_up(struct sss_nic_dev *nic_dev); +int sss_nic_force_link_down(struct sss_nic_dev *nic_dev); +int sss_nic_set_force_link_flag(struct net_device *netdev, u32 flag); +void sss_nic_loop_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c new file mode 100644 index 00000000000000..f52851b74fe5f8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_ethtool_stats.h" +#include "sss_nic_ethtool_stats_api.h" + +typedef int (*sss_nic_ss_handler_t)(struct sss_nic_dev *nic_dev); + +struct sss_nic_handler { + int type; + sss_nic_ss_handler_t handler_func; +}; + +typedef void (*sss_nic_strings_handler_t)(struct sss_nic_dev *nic_dev, + u8 *buffer); + +struct sss_nic_get_strings { + int type; + sss_nic_strings_handler_t handler_func; +}; + +int sss_nic_get_sset_count(struct net_device *netdev, int settings) +{ + int i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + struct sss_nic_handler handler[] = { + {ETH_SS_TEST, sss_nic_eth_ss_test}, + {ETH_SS_STATS, sss_nic_eth_ss_stats}, + {ETH_SS_PRIV_FLAGS, sss_nic_eth_ss_priv_flags}, + }; + + for (i = 0; i < ARRAY_LEN(handler); i++) + if (settings == handler[i].type) + return handler[i].handler_func(nic_dev); + + return -EOPNOTSUPP; +} + +void sss_nic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + u16 cnt; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + cnt = sss_nic_get_ethtool_dev_stats(nic_dev, data); + + cnt += sss_nic_get_ethtool_vport_stats(nic_dev, data + cnt); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + cnt += sss_nic_get_ethtool_port_stats(nic_dev, data + cnt); + + sss_nic_get_drv_queue_stats(nic_dev, data + cnt); +} + +void sss_nic_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + int i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + struct sss_nic_get_strings handler[] = { + {ETH_SS_TEST, sss_nic_get_test_strings}, + {ETH_SS_STATS, sss_nic_get_drv_stats_strings}, + {ETH_SS_PRIV_FLAGS, sss_nic_get_priv_flags_strings}, + }; + + for (i = 0; i < ARRAY_LEN(handler); i++) + if (stringset == handler[i].type) + return handler[i].handler_func(nic_dev, buf); + + nicif_err(nic_dev, drv, netdev, "Invalid string set %u.", stringset); +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *ksetting) +{ + int ret; + struct sss_nic_cmd_link_settings cmd = {0}; + + sss_nic_ethtool_ksetting_clear(ksetting, supported); + sss_nic_ethtool_ksetting_clear(ksetting, advertising); + + ret = sss_nic_get_link_setting(net_dev, &cmd); + if (ret != 0) + return ret; + + sss_nic_copy_ksetting(ksetting, &cmd); + + return 0; +} +#endif +#endif + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ksettings) +{ + /* Only support to set autoneg and speed */ + return sssnic_set_link_settings(netdev, + ksettings->base.autoneg, ksettings->base.speed); +} +#endif +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h new file mode 100644 index 00000000000000..3e3d6e1aa8d638 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_STATS_H +#define SSS_NIC_ETHTOOL_STATS_H + +#include +#include + +#include "sss_kernel.h" + +void sss_nic_get_strings(struct net_device *netdev, u32 stringset, u8 *buf); +void sss_nic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +int sss_nic_get_sset_count(struct net_device *netdev, int settings); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *ksetting); +int sss_nic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ksettings); +#endif +#endif + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c new file mode 100644 index 00000000000000..1fc598755ba43f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c @@ -0,0 +1,1058 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_ethtool_stats_api.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_netdev_ops_api.h" + +#define SSSNIC_SET_SUPPORTED_MODE 0 +#define SSSNIC_SET_ADVERTISED_MODE 1 + +#define SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->supported) +#define SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) + +#define SSSNIC_ETHTOOL_ADD_SPPED_LINK_MODE(ecmd, mode, op) \ +do { \ + u32 _link_mode; \ + unsigned long *val = ((op) == SSSNIC_SET_SUPPORTED_MODE) ? \ + (ecmd)->supported : (ecmd)->advertising; \ + for (_link_mode = 0; _link_mode < g_link_mode_table[mode].array_len; _link_mode++) { \ + if (g_link_mode_table[mode].array[_link_mode] >= \ + __ETHTOOL_LINK_MODE_MASK_NBITS) \ + continue; \ + set_bit(g_link_mode_table[mode].array[_link_mode], val); \ + } \ +} while (0) + +#define SSSNIC_NETDEV_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct rtnl_link_stats64, _item), \ + .offset = offsetof(struct rtnl_link_stats64, _item) \ +} + +#define SSSNIC_TX_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_tx_stats, _item), \ + .offset = offsetof(struct sss_nic_tx_stats, _item) \ +} + +#define SSSNIC_RQ_STATS(_item) { \ + .name = "rxq%d_"#_item, \ + .len = FIELD_SIZEOF(struct sss_nic_rq_stats, _item), \ + .offset = offsetof(struct sss_nic_rq_stats, _item) \ +} + +#define SSSNIC_SQ_STATS(_item) { \ + .name = "txq%d_"#_item, \ + .len = FIELD_SIZEOF(struct sss_nic_sq_stats, _item), \ + .offset = offsetof(struct sss_nic_sq_stats, _item) \ +} + +#define SSSNIC_FUNCTION_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_port_stats, _item), \ + .offset = offsetof(struct sss_nic_port_stats, _item) \ +} + +#define SSSNIC_PORT_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_mag_port_stats, _item), \ + .offset = offsetof(struct sss_nic_mag_port_stats, _item) \ +} + +#define SSSNIC_GET_VALUE_OF_PTR(len, ptr) ( \ + (len) == sizeof(u64) ? *(u64 *)(ptr) : \ + (len) == sizeof(u32) ? *(u32 *)(ptr) : \ + (len) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ +) + +#define SSSNIC_DEV_STATS_PACK(items, item_idx, array, stats_ptr) \ +do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, SSS_TOOL_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + SSSNIC_GET_VALUE_OF_PTR((array)[j].len, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +#define SSSNIC_QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) \ +do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + SSS_TOOL_SHOW_ITEM_LEN); \ + snprintf((items)[item_idx].name, SSS_TOOL_SHOW_ITEM_LEN, \ + (array)[j].name, (qid)); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + SSSNIC_GET_VALUE_OF_PTR((array)[j].len, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +#define SSSNIC_CONVERT_DATA_TYPE(len, p) (((len) == sizeof(u64)) ? *(u64 *)(p) : *(u32 *)(p)) +#define SSSNIC_AUTONEG_STRING(autoneg) ((autoneg) ? ("autong enable") : ("autong disable")) +#define SSSNIC_AUTONEG_ENABLE(autoneg) ((autoneg) ? SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF) + +#define SSSNIC_NEGATE_ZERO_U32 ((u32)~0) + +struct sss_nic_hw2ethtool_link_mode { + const u32 *array; + u32 array_len; + u32 speed; +}; + +typedef void (*sss_nic_port_type_handler_t)(struct sss_nic_cmd_link_settings *cmd); + +static void sss_nic_set_fibre_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_da_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_tp_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_none_port(struct sss_nic_cmd_link_settings *cmd); + +static char g_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + +static char g_priv_flags_strings[][ETH_GSTRING_LEN] = { + "Symmetric-RSS", + "Force-Link-up", + "Rxq_Recovery", +}; + +static struct sss_nic_stats g_nic_sq_stats[] = { + SSSNIC_SQ_STATS(tx_packets), + SSSNIC_SQ_STATS(tx_bytes), + SSSNIC_SQ_STATS(tx_busy), + SSSNIC_SQ_STATS(wake), + SSSNIC_SQ_STATS(tx_dropped), +}; + +static struct sss_nic_stats g_nic_sq_stats_extern[] = { + SSSNIC_SQ_STATS(skb_pad_err), + SSSNIC_SQ_STATS(offload_err), + SSSNIC_SQ_STATS(dma_map_err), + SSSNIC_SQ_STATS(unknown_tunnel_proto), + SSSNIC_SQ_STATS(frag_size_zero), + SSSNIC_SQ_STATS(frag_len_overflow), + SSSNIC_SQ_STATS(rsvd1), + SSSNIC_SQ_STATS(rsvd2), +}; + +static struct sss_nic_stats g_nic_rq_stats[] = { + SSSNIC_RQ_STATS(rx_packets), + SSSNIC_RQ_STATS(rx_bytes), + SSSNIC_RQ_STATS(errors), + SSSNIC_RQ_STATS(csum_errors), + SSSNIC_RQ_STATS(other_errors), + SSSNIC_RQ_STATS(rx_dropped), +#ifdef HAVE_XDP_SUPPORT + SSSNIC_RQ_STATS(xdp_dropped), +#endif + SSSNIC_RQ_STATS(rx_buf_errors), +}; + +static struct sss_nic_stats g_nic_rq_stats_extern[] = { + SSSNIC_RQ_STATS(alloc_rx_dma_err), + SSSNIC_RQ_STATS(alloc_skb_err), + SSSNIC_RQ_STATS(reset_drop_sge), + SSSNIC_RQ_STATS(large_xdp_pkts), + SSSNIC_RQ_STATS(rsvd2), +}; + +static struct sss_nic_stats g_netdev_stats[] = { + SSSNIC_NETDEV_STATS(rx_packets), + SSSNIC_NETDEV_STATS(tx_packets), + SSSNIC_NETDEV_STATS(rx_bytes), + SSSNIC_NETDEV_STATS(tx_bytes), + SSSNIC_NETDEV_STATS(rx_errors), + SSSNIC_NETDEV_STATS(tx_errors), + SSSNIC_NETDEV_STATS(rx_dropped), + SSSNIC_NETDEV_STATS(tx_dropped), + SSSNIC_NETDEV_STATS(multicast), + SSSNIC_NETDEV_STATS(collisions), + SSSNIC_NETDEV_STATS(rx_length_errors), + SSSNIC_NETDEV_STATS(rx_over_errors), + SSSNIC_NETDEV_STATS(rx_crc_errors), + SSSNIC_NETDEV_STATS(rx_frame_errors), + SSSNIC_NETDEV_STATS(rx_fifo_errors), + SSSNIC_NETDEV_STATS(rx_missed_errors), + SSSNIC_NETDEV_STATS(tx_aborted_errors), + SSSNIC_NETDEV_STATS(tx_carrier_errors), + SSSNIC_NETDEV_STATS(tx_fifo_errors), + SSSNIC_NETDEV_STATS(tx_heartbeat_errors), +}; + +static struct sss_nic_stats g_dev_stats[] = { + SSSNIC_TX_STATS(tx_timeout), +}; + +static struct sss_nic_stats g_dev_stats_extern[] = { + SSSNIC_TX_STATS(tx_drop), + SSSNIC_TX_STATS(tx_invalid_qid), + SSSNIC_TX_STATS(rsvd1), + SSSNIC_TX_STATS(rsvd2), +}; + +static struct sss_nic_stats g_function_stats[] = { + SSSNIC_FUNCTION_STATS(tx_unicast_pkts), + SSSNIC_FUNCTION_STATS(tx_unicast_bytes), + SSSNIC_FUNCTION_STATS(tx_multicast_pkts), + SSSNIC_FUNCTION_STATS(tx_multicast_bytes), + SSSNIC_FUNCTION_STATS(tx_broadcast_pkts), + SSSNIC_FUNCTION_STATS(tx_broadcast_bytes), + + SSSNIC_FUNCTION_STATS(rx_unicast_pkts), + SSSNIC_FUNCTION_STATS(rx_unicast_bytes), + SSSNIC_FUNCTION_STATS(rx_multicast_pkts), + SSSNIC_FUNCTION_STATS(rx_multicast_bytes), + SSSNIC_FUNCTION_STATS(rx_broadcast_pkts), + SSSNIC_FUNCTION_STATS(rx_broadcast_bytes), + + SSSNIC_FUNCTION_STATS(tx_discard), + SSSNIC_FUNCTION_STATS(rx_discard), + SSSNIC_FUNCTION_STATS(tx_err), + SSSNIC_FUNCTION_STATS(rx_err), +}; + +static struct sss_nic_stats g_port_stats[] = { + SSSNIC_PORT_STATS(tx_fragment_pkts), + SSSNIC_PORT_STATS(tx_undersize_pkts), + SSSNIC_PORT_STATS(tx_undermin_pkts), + SSSNIC_PORT_STATS(tx_64_oct_pkts), + SSSNIC_PORT_STATS(tx_65_127_oct_pkts), + SSSNIC_PORT_STATS(tx_128_255_oct_pkts), + SSSNIC_PORT_STATS(tx_256_511_oct_pkts), + SSSNIC_PORT_STATS(tx_512_1023_oct_pkts), + SSSNIC_PORT_STATS(tx_1024_1518_oct_pkts), + SSSNIC_PORT_STATS(tx_1519_2047_oct_pkts), + SSSNIC_PORT_STATS(tx_2048_4095_oct_pkts), + SSSNIC_PORT_STATS(tx_4096_8191_oct_pkts), + SSSNIC_PORT_STATS(tx_8192_9216_oct_pkts), + SSSNIC_PORT_STATS(tx_9217_12287_oct_pkts), + SSSNIC_PORT_STATS(tx_12288_16383_oct_pkts), + SSSNIC_PORT_STATS(tx_1519_max_bad_pkts), + SSSNIC_PORT_STATS(tx_1519_max_good_pkts), + SSSNIC_PORT_STATS(tx_oversize_pkts), + SSSNIC_PORT_STATS(tx_jabber_pkts), + SSSNIC_PORT_STATS(tx_bad_pkts), + SSSNIC_PORT_STATS(tx_bad_octs), + SSSNIC_PORT_STATS(tx_good_pkts), + SSSNIC_PORT_STATS(tx_good_octs), + SSSNIC_PORT_STATS(tx_total_pkts), + SSSNIC_PORT_STATS(tx_total_octs), + SSSNIC_PORT_STATS(tx_uni_pkts), + SSSNIC_PORT_STATS(tx_multi_pkts), + SSSNIC_PORT_STATS(tx_broad_pkts), + SSSNIC_PORT_STATS(tx_pauses), + SSSNIC_PORT_STATS(tx_pfc_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri0_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri1_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri2_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri3_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri4_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri5_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri6_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri7_pkts), + SSSNIC_PORT_STATS(tx_control_pkts), + SSSNIC_PORT_STATS(tx_err_all_pkts), + SSSNIC_PORT_STATS(tx_from_app_good_pkts), + SSSNIC_PORT_STATS(tx_from_app_bad_pkts), + + SSSNIC_PORT_STATS(rx_fragment_pkts), + SSSNIC_PORT_STATS(rx_undersize_pkts), + SSSNIC_PORT_STATS(rx_undermin_pkts), + SSSNIC_PORT_STATS(rx_64_oct_pkts), + SSSNIC_PORT_STATS(rx_65_127_oct_pkts), + SSSNIC_PORT_STATS(rx_128_255_oct_pkts), + SSSNIC_PORT_STATS(rx_256_511_oct_pkts), + SSSNIC_PORT_STATS(rx_512_1023_oct_pkts), + SSSNIC_PORT_STATS(rx_1024_1518_oct_pkts), + SSSNIC_PORT_STATS(rx_1519_2047_oct_pkts), + SSSNIC_PORT_STATS(rx_2048_4095_oct_pkts), + SSSNIC_PORT_STATS(rx_4096_8191_oct_pkts), + SSSNIC_PORT_STATS(rx_8192_9216_oct_pkts), + SSSNIC_PORT_STATS(rx_9217_12287_oct_pkts), + SSSNIC_PORT_STATS(rx_12288_16383_oct_pkts), + SSSNIC_PORT_STATS(rx_1519_max_bad_pkts), + SSSNIC_PORT_STATS(rx_1519_max_good_pkts), + SSSNIC_PORT_STATS(rx_oversize_pkts), + SSSNIC_PORT_STATS(rx_jabber_pkts), + SSSNIC_PORT_STATS(rx_bad_pkts), + SSSNIC_PORT_STATS(rx_bad_octs), + SSSNIC_PORT_STATS(rx_good_pkts), + SSSNIC_PORT_STATS(rx_good_octs), + SSSNIC_PORT_STATS(rx_total_pkts), + SSSNIC_PORT_STATS(rx_total_octs), + SSSNIC_PORT_STATS(rx_uni_pkts), + SSSNIC_PORT_STATS(rx_multi_pkts), + SSSNIC_PORT_STATS(rx_broad_pkts), + SSSNIC_PORT_STATS(rx_pauses), + SSSNIC_PORT_STATS(rx_pfc_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri0_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri1_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri2_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri3_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri4_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri5_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri6_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri7_pkts), + SSSNIC_PORT_STATS(rx_control_pkts), + SSSNIC_PORT_STATS(rx_sym_err_pkts), + SSSNIC_PORT_STATS(rx_fcs_err_pkts), + SSSNIC_PORT_STATS(rx_send_app_good_pkts), + SSSNIC_PORT_STATS(rx_send_app_bad_pkts), + SSSNIC_PORT_STATS(rx_unfilter_pkts), +}; + +static const u32 g_mag_link_mode_ge[] = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 g_mag_link_mode_10ge_base_r[] = { + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 g_mag_link_mode_25ge_base_r[] = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 g_mag_link_mode_40ge_base_r4[] = { + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, +}; + +static const u32 g_mag_link_mode_50ge_base_r[] = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, +}; + +static const u32 g_mag_link_mode_50ge_base_r2[] = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r[] = { + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r2[] = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r4[] = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static const u32 g_mag_link_mode_200ge_base_r2[] = { + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_200ge_base_r4[] = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +static const struct sss_nic_hw2ethtool_link_mode + g_link_mode_table[SSSNIC_LINK_MODE_MAX_NUMBERS] = { + [SSSNIC_LINK_MODE_GE] = { + .array = g_mag_link_mode_ge, + .array_len = ARRAY_LEN(g_mag_link_mode_ge), + .speed = SPEED_1000, + }, + [SSSNIC_LINK_MODE_10GE_BASE_R] = { + .array = g_mag_link_mode_10ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_10ge_base_r), + .speed = SPEED_10000, + }, + [SSSNIC_LINK_MODE_25GE_BASE_R] = { + .array = g_mag_link_mode_25ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_25ge_base_r), + .speed = SPEED_25000, + }, + [SSSNIC_LINK_MODE_40GE_BASE_R4] = { + .array = g_mag_link_mode_40ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_40ge_base_r4), + .speed = SPEED_40000, + }, + [SSSNIC_LINK_MODE_50GE_BASE_R] = { + .array = g_mag_link_mode_50ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_50ge_base_r), + .speed = SPEED_50000, + }, + [SSSNIC_LINK_MODE_50GE_BASE_R2] = { + .array = g_mag_link_mode_50ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_50ge_base_r2), + .speed = SPEED_50000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R] = { + .array = g_mag_link_mode_100ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R2] = { + .array = g_mag_link_mode_100ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r2), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R4] = { + .array = g_mag_link_mode_100ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r4), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_200GE_BASE_R2] = { + .array = g_mag_link_mode_200ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_200ge_base_r2), + .speed = SPEED_200000, + }, + [SSSNIC_LINK_MODE_200GE_BASE_R4] = { + .array = g_mag_link_mode_200ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_200ge_base_r4), + .speed = SPEED_200000, + }, +}; + +/* Related to enum sss_nic_mag_opcode_port_speed */ +static u32 g_hw_to_ethtool_speed[] = { + (u32)SPEED_UNKNOWN, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000 +}; + +static sss_nic_port_type_handler_t g_link_port_set_handler[] = { + NULL, + sss_nic_set_fibre_port, + sss_nic_set_fibre_port, + sss_nic_set_da_port, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + sss_nic_set_fibre_port, + sss_nic_set_tp_port, + sss_nic_set_none_port +}; + +u32 sss_nic_get_io_stats_size(const struct sss_nic_dev *nic_dev) +{ + u32 count; + + count = ARRAY_LEN(g_dev_stats) + + ARRAY_LEN(g_dev_stats_extern) + + (ARRAY_LEN(g_nic_sq_stats) + + ARRAY_LEN(g_nic_sq_stats_extern) + + ARRAY_LEN(g_nic_rq_stats) + + ARRAY_LEN(g_nic_rq_stats_extern)) * nic_dev->max_qp_num; + + return count; +} + +int sss_nic_eth_ss_test(struct sss_nic_dev *nic_dev) +{ + return ARRAY_LEN(g_test_strings); +} + +int sss_nic_eth_ss_stats(struct sss_nic_dev *nic_dev) +{ + int count; + int q_num; + + q_num = nic_dev->qp_res.qp_num; + count = ARRAY_LEN(g_netdev_stats) + ARRAY_LEN(g_dev_stats) + + ARRAY_LEN(g_function_stats) + (ARRAY_LEN(g_nic_sq_stats) + + ARRAY_LEN(g_nic_rq_stats)) * q_num; + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + count += ARRAY_LEN(g_port_stats); + + return count; +} + +int sss_nic_eth_ss_priv_flags(struct sss_nic_dev *nic_dev) +{ + return ARRAY_LEN(g_priv_flags_strings); +} + +static void sss_nic_get_ethtool_stats_data(char *ethtool_stats, + struct sss_nic_stats *stats, u16 stats_len, u64 *data) +{ + u16 i = 0; + u16 j = 0; + char *ptr = NULL; + + for (j = 0; j < stats_len; j++) { + ptr = ethtool_stats + stats[j].offset; + data[i] = SSSNIC_CONVERT_DATA_TYPE(stats[j].len, ptr); + i++; + } +} + +u16 sss_nic_get_ethtool_dev_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + u16 cnt = 0; +#ifdef HAVE_NDO_GET_STATS64 + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats = NULL; + + net_stats = dev_get_stats(nic_dev->netdev, &temp); +#else + const struct net_device_stats *net_stats = NULL; + + net_stats = dev_get_stats(nic_dev->netdev); +#endif + + sss_nic_get_ethtool_stats_data((char *)net_stats, g_netdev_stats, + ARRAY_LEN(g_netdev_stats), data); + cnt += ARRAY_LEN(g_netdev_stats); + + sss_nic_get_ethtool_stats_data((char *)&nic_dev->tx_stats, g_dev_stats, + ARRAY_LEN(g_dev_stats), data + cnt); + cnt += ARRAY_LEN(g_dev_stats); + + return cnt; +} + +void sss_nic_get_drv_queue_stats(struct sss_nic_dev *nic_dev, u64 *data) +{ + u16 qid; + struct sss_nic_rq_stats rq_stats = {0}; + struct sss_nic_sq_stats sq_stats = {0}; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!nic_dev->sq_desc_group) + break; + + sss_nic_get_sq_stats(&nic_dev->sq_desc_group[qid], &sq_stats); + sss_nic_get_ethtool_stats_data((char *)&sq_stats, g_nic_sq_stats, + ARRAY_LEN(g_nic_sq_stats), + data + qid * ARRAY_LEN(g_nic_sq_stats)); + } + + data += ARRAY_LEN(g_nic_sq_stats) * nic_dev->qp_res.qp_num; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!nic_dev->rq_desc_group) + break; + + sss_nic_get_rq_stats(&nic_dev->rq_desc_group[qid], &rq_stats); + sss_nic_get_ethtool_stats_data((char *)&rq_stats, g_nic_rq_stats, + ARRAY_LEN(g_nic_rq_stats), + data + qid * ARRAY_LEN(g_nic_rq_stats)); + } +} + +int sss_nic_get_ethtool_vport_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + int ret; + struct sss_nic_port_stats vport_stats = {0}; + + ret = sss_nic_get_vport_stats(nic_dev, sss_get_global_func_id(nic_dev->hwdev), + &vport_stats); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get function stats from fw, ret:%d\n", ret); + return ARRAY_LEN(g_function_stats); + } + sss_nic_get_ethtool_stats_data((char *)&vport_stats, g_function_stats, + ARRAY_LEN(g_function_stats), data); + + return ARRAY_LEN(g_function_stats); +} + +u16 sss_nic_get_ethtool_port_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + int ret; + u16 i = 0; + struct sss_nic_mag_port_stats *stats = NULL; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + memset(&data[i], 0, ARRAY_LEN(g_port_stats) * sizeof(*data)); + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to Malloc port stats\n"); + return ARRAY_LEN(g_port_stats); + } + + ret = sss_nic_get_phy_port_stats(nic_dev, stats); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get port stats from fw\n"); + goto out; + } + + sss_nic_get_ethtool_stats_data((char *)stats, g_port_stats, + ARRAY_LEN(g_port_stats), data); + +out: + kfree(stats); + + return ARRAY_LEN(g_port_stats); +} + +u16 sss_nic_get_stats_strings(struct sss_nic_stats *stats, + u16 stats_len, char *buffer) +{ + u16 i; + + for (i = 0; i < stats_len; i++) { + memcpy(buffer, stats[i].name, ETH_GSTRING_LEN); + buffer += ETH_GSTRING_LEN; + } + + return i; +} + +u16 sss_nic_get_drv_dev_strings(struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 cnt = + sss_nic_get_stats_strings(g_netdev_stats, ARRAY_LEN(g_netdev_stats), buffer); + cnt += sss_nic_get_stats_strings(g_dev_stats, ARRAY_LEN(g_dev_stats), + buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +u16 sss_nic_get_hw_stats_strings(struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 cnt = sss_nic_get_stats_strings(g_function_stats, + ARRAY_LEN(g_function_stats), buffer); + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + return cnt; + + cnt += sss_nic_get_stats_strings(g_port_stats, + ARRAY_LEN(g_port_stats), buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +int sss_nic_get_queue_stats_cnt(const struct sss_nic_dev *nic_dev, + struct sss_nic_stats *stats, u16 stats_len, u16 qid, char *buffer) +{ + int ret; + u16 i; + + for (i = 0; i < stats_len; i++) { + ret = sprintf(buffer, stats[i].name, qid); + if (ret < 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to sprintf stats name:%s, qid: %u, stats id: %u\n", + stats[i].name, qid, i); + buffer += ETH_GSTRING_LEN; + } + + return i; +} + +u16 sss_nic_get_qp_stats_strings(const struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 qid = 0; + u16 cnt = 0; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + cnt += sss_nic_get_queue_stats_cnt(nic_dev, g_nic_sq_stats, + ARRAY_LEN(g_nic_sq_stats), qid, + buffer + cnt * ETH_GSTRING_LEN); + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + cnt += sss_nic_get_queue_stats_cnt(nic_dev, g_nic_rq_stats, + ARRAY_LEN(g_nic_rq_stats), qid, + buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +void sss_nic_get_test_strings(struct sss_nic_dev *nic_dev, u8 *buffer) +{ + memcpy(buffer, *g_test_strings, sizeof(g_test_strings)); +} + +void sss_nic_get_drv_stats_strings(struct sss_nic_dev *nic_dev, + u8 *buffer) +{ + u16 offset = 0; + + offset = sss_nic_get_drv_dev_strings(nic_dev, buffer); + offset += sss_nic_get_hw_stats_strings(nic_dev, buffer + offset * ETH_GSTRING_LEN); + sss_nic_get_qp_stats_strings(nic_dev, buffer + offset * ETH_GSTRING_LEN); +} + +void sss_nic_get_priv_flags_strings(struct sss_nic_dev *nic_dev, + u8 *buffer) +{ + memcpy(buffer, g_priv_flags_strings, sizeof(g_priv_flags_strings)); +} + +int sss_nic_get_speed_level(u32 speed) +{ + int level; + + for (level = 0; level < ARRAY_LEN(g_hw_to_ethtool_speed); level++) { + if (g_hw_to_ethtool_speed[level] == speed) + break; + } + + return level; +} + +void sss_nic_add_ethtool_link_mode(struct sss_nic_cmd_link_settings *cmd, + u32 hw_mode, u32 op) +{ + u32 i; + + for (i = 0; i < SSSNIC_LINK_MODE_MAX_NUMBERS; i++) { + if (test_bit(i, (unsigned long *)&hw_mode)) + SSSNIC_ETHTOOL_ADD_SPPED_LINK_MODE(cmd, i, op); + } +} + +void sss_nic_set_link_speed(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd, + struct sss_nic_port_info *port_info) +{ + int ret; + u8 state = 0; + + if (port_info->supported_mode != SSSNIC_LINK_MODE_UNKNOWN) + sss_nic_add_ethtool_link_mode(cmd, + port_info->supported_mode, + SSSNIC_SET_SUPPORTED_MODE); + if (port_info->advertised_mode != SSSNIC_LINK_MODE_UNKNOWN) + sss_nic_add_ethtool_link_mode(cmd, + port_info->advertised_mode, + SSSNIC_SET_ADVERTISED_MODE); + + ret = sss_nic_get_hw_link_state(nic_dev, &state); + if (ret != 0 || state == 0) { + cmd->duplex = DUPLEX_UNKNOWN; + cmd->speed = (u32)SPEED_UNKNOWN; + return; + } + + cmd->duplex = port_info->duplex; + cmd->speed = port_info->speed < ARRAY_LEN(g_hw_to_ethtool_speed) ? + g_hw_to_ethtool_speed[port_info->speed] : (u32)SPEED_UNKNOWN; +} + +static void sss_nic_set_fibre_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, FIBRE); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, FIBRE); + cmd->port = PORT_FIBRE; +} + +static void sss_nic_set_da_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, FIBRE); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, FIBRE); + cmd->port = PORT_DA; +} + +static void sss_nic_set_tp_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, TP); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, TP); + cmd->port = PORT_TP; +} + +static void sss_nic_set_none_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Backplane); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Backplane); + cmd->port = PORT_NONE; +} + +void sss_nic_link_port_type(struct sss_nic_cmd_link_settings *cmd, + u8 port_type) +{ + if (port_type >= ARRAY_LEN(g_link_port_set_handler)) { + cmd->port = PORT_OTHER; + return; + } + + if (!g_link_port_set_handler[port_type]) { + cmd->port = PORT_OTHER; + return; + } + + g_link_port_set_handler[port_type](cmd); +} + +int sss_nic_get_link_pause_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd) +{ + int ret; + struct sss_nic_pause_cfg pause_config = {0}; + + ret = sss_nic_get_hw_pause_info(nic_dev, &pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get pauseparam from hw\n"); + return ret; + } + + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Pause); + if (pause_config.rx_pause != 0 && pause_config.tx_pause != 0) { + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Pause); + return 0; + } + + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Asym_Pause); + if (pause_config.rx_pause != 0) + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Pause); + + return 0; +} + +int sss_nic_get_link_setting(struct net_device *net_dev, + struct sss_nic_cmd_link_settings *cmd) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(net_dev); + struct sss_nic_port_info info = {0}; + + ret = sss_nic_get_hw_port_info(nic_dev, &info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, net_dev, "Fail to get port info\n"); + return ret; + } + + sss_nic_set_link_speed(nic_dev, cmd, &info); + sss_nic_link_port_type(cmd, info.port_type); + + cmd->autoneg = info.autoneg_state == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (info.autoneg_cap != 0) + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Autoneg); + if (info.autoneg_state == SSSNIC_PORT_CFG_AN_ON) + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Autoneg); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + ret = sss_nic_get_link_pause_setting(nic_dev, cmd); + + return ret; +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +void sss_nic_copy_ksetting(struct ethtool_link_ksettings *ksetting, + struct sss_nic_cmd_link_settings *cmd) +{ + struct ethtool_link_settings *setting = &ksetting->base; + + bitmap_copy(ksetting->link_modes.advertising, cmd->advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(ksetting->link_modes.supported, cmd->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + setting->speed = cmd->speed; + setting->duplex = cmd->duplex; + setting->port = cmd->port; + setting->autoneg = cmd->autoneg; +} +#endif +#endif + +bool sss_nic_is_support_speed(u32 support_mode, u32 speed) +{ + u32 link_mode; + + for (link_mode = 0; link_mode < SSSNIC_LINK_MODE_MAX_NUMBERS; link_mode++) { + if ((support_mode & BIT(link_mode)) == 0) + continue; + + if (g_link_mode_table[link_mode].speed == speed) + return true; + } + + return false; +} + +int sss_nic_get_link_settings_param(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *settings) +{ + struct sss_nic_port_info info = {0}; + int ret; + int level; + + ret = sss_nic_get_hw_port_info(nic_dev, &info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get port info\n"); + return -EAGAIN; + } + + if (info.autoneg_cap != 0) + *settings |= SSSNIC_LINK_SET_AUTONEG; + + if (autoneg == AUTONEG_ENABLE) { + if (info.autoneg_cap == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport autoneg\n"); + return -EOPNOTSUPP; + } + + return 0; + } + + if (speed != (u32)SPEED_UNKNOWN) { + if (info.supported_mode == SSSNIC_LINK_MODE_UNKNOWN || + info.advertised_mode == SSSNIC_LINK_MODE_UNKNOWN) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport link mode\n"); + return -EAGAIN; + } + + /* Set speed only when autoneg is disable */ + level = sss_nic_get_speed_level(speed); + if (level >= SSSNIC_PORT_SPEED_UNKNOWN || + !sss_nic_is_support_speed(info.supported_mode, speed)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport speed: %u\n", speed); + return -EINVAL; + } + + *settings |= SSSNIC_LINK_SET_SPEED; + return 0; + } + + nicif_err(nic_dev, drv, nic_dev->netdev, "Set speed when autoneg is off\n"); + return -EOPNOTSUPP; +} + +int sss_nic_set_settings_to_hw(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 settings) +{ + int ret; + int level = 0; + char cmd_str[128] = {0}; + struct sss_nic_link_ksettings cmd = {0}; + struct net_device *netdev = nic_dev->netdev; + char *str = (bool)((settings & SSSNIC_LINK_SET_AUTONEG) != 0) ? + SSSNIC_AUTONEG_STRING((bool)autoneg) : ""; + + ret = snprintf(cmd_str, sizeof(cmd_str) - 1, "%s", str); + if (ret < 0) + return -EINVAL; + + if ((settings & SSSNIC_LINK_SET_SPEED) != 0) { + level = sss_nic_get_speed_level(speed); + ret = sprintf(cmd_str + strlen(cmd_str), "speed %u ", speed); + if (ret < 0) + return -EINVAL; + } + + cmd.valid_bitmap = settings; + cmd.autoneg = SSSNIC_AUTONEG_ENABLE((bool)autoneg); + cmd.speed = (u8)level; + + ret = sss_nic_set_link_settings(nic_dev, &cmd); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set %s\n", cmd_str); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set %s, ret: %d\n", cmd_str, ret); + return 0; +} + +int sssnic_set_link_settings(struct net_device *netdev, + u8 autoneg, u32 speed) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 settings = 0; + int ret; + + ret = sss_nic_get_link_settings_param(nic_dev, autoneg, speed, &settings); + if (ret != 0) + return ret; + + if (settings != 0) + return sss_nic_set_settings_to_hw(nic_dev, autoneg, speed, settings); + + nicif_info(nic_dev, drv, netdev, "Nothing change, exit.\n"); + + return 0; +} + +void sss_nic_get_io_stats(const struct sss_nic_dev *nic_dev, void *stats) +{ + struct sss_tool_show_item *items = stats; + int item_idx = 0; + u16 qid; + + SSSNIC_DEV_STATS_PACK(items, item_idx, g_dev_stats, &nic_dev->tx_stats); + SSSNIC_DEV_STATS_PACK(items, item_idx, g_dev_stats_extern, + &nic_dev->tx_stats); + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_sq_stats, + &nic_dev->sq_desc_group[qid].stats, qid); + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_sq_stats_extern, + &nic_dev->sq_desc_group[qid].stats, qid); + } + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_rq_stats, + &nic_dev->rq_desc_group[qid].stats, qid); + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_rq_stats_extern, + &nic_dev->rq_desc_group[qid].stats, qid); + } +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h new file mode 100644 index 00000000000000..cf2b1cbe894a1a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_STATS_API_H +#define SSS_NIC_ETHTOOL_STATS_API_H + +#include +#include + +#include "sss_kernel.h" + +struct sss_nic_stats { + char name[ETH_GSTRING_LEN]; + u32 len; + int offset; +}; + +struct sss_nic_cmd_link_settings { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + + u32 speed; + u8 duplex; + u8 port; + u8 autoneg; +}; + +#define sss_nic_ethtool_ksetting_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) + +int sss_nic_eth_ss_test(struct sss_nic_dev *nic_dev); + +int sss_nic_eth_ss_stats(struct sss_nic_dev *nic_dev); + +int sss_nic_eth_ss_priv_flags(struct sss_nic_dev *nic_dev); + +u16 sss_nic_get_ethtool_dev_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +void sss_nic_get_drv_queue_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +int sss_nic_get_ethtool_vport_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +u16 sss_nic_get_ethtool_port_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +u16 sss_nic_get_stats_strings(struct sss_nic_stats *stats, + u16 stats_len, char *buffer); + +u16 sss_nic_get_drv_dev_strings(struct sss_nic_dev *nic_dev, + char *buffer); + +u16 sss_nic_get_hw_stats_strings(struct sss_nic_dev *nic_dev, + char *buffer); + +int sss_nic_get_queue_stats_cnt(const struct sss_nic_dev *nic_dev, + struct sss_nic_stats *stats, u16 stats_len, u16 qid, char *buffer); + +u16 sss_nic_get_qp_stats_strings(const struct sss_nic_dev *nic_dev, + char *buffer); + +void sss_nic_get_test_strings(struct sss_nic_dev *nic_dev, u8 *buffer); + +void sss_nic_get_drv_stats_strings(struct sss_nic_dev *nic_dev, + u8 *buffer); + +void sss_nic_get_priv_flags_strings(struct sss_nic_dev *nic_dev, + u8 *buffer); + +int sss_nic_get_speed_level(u32 speed); + +void sss_nic_add_ethtool_link_mode(struct sss_nic_cmd_link_settings *cmd, u32 hw_mode, u32 op); + +void sss_nic_set_link_speed(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd, + struct sss_nic_port_info *port_info); + +void sss_nic_link_port_type(struct sss_nic_cmd_link_settings *cmd, + u8 port_type); + +int sss_nic_get_link_pause_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd); + +int sss_nic_get_link_setting(struct net_device *net_dev, + struct sss_nic_cmd_link_settings *cmd); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +void sss_nic_copy_ksetting(struct ethtool_link_ksettings *ksetting, + struct sss_nic_cmd_link_settings *cmd); +#endif +#endif + +bool sss_nic_is_support_speed(u32 support_mode, u32 speed); + +int sss_nic_get_link_settings_param(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *settings); + +int sss_nic_set_settings_to_hw(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 settings); + +int sssnic_set_link_settings(struct net_device *netdev, + u8 autoneg, u32 speed); + +void sss_nic_get_io_stats(const struct sss_nic_dev *nic_dev, void *stats); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c new file mode 100644 index 00000000000000..6b8418bdfd18ed --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_event.h" + +#define SSSNIC_VF_UNREGISTER 0 + +static void sss_nic_dcb_state_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static void sss_nic_tx_pause_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static void sss_nic_bond_active_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_register_vf_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_get_vf_cos_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_get_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_set_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_del_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_update_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +static const struct nic_event_handler g_event_proc[] = { + { + .opcode = SSSNIC_MBX_OPCODE_GET_VF_COS, + .event_handler = sss_nic_dcb_state_event_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_TX_PAUSE_EXCP_NOTICE, + .event_handler = sss_nic_tx_pause_event_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE, + .event_handler = sss_nic_bond_active_event_handler, + }, +}; + +static const struct sss_nic_vf_msg_handler g_vf_cmd_proc[] = { + { + .opcode = SSSNIC_MBX_OPCODE_VF_REGISTER, + .msg_handler = sss_nic_register_vf_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_GET_VF_COS, + .msg_handler = sss_nic_get_vf_cos_msg_handler + }, + + { + .opcode = SSSNIC_MBX_OPCODE_GET_MAC, + .msg_handler = sss_nic_get_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_SET_MAC, + .msg_handler = sss_nic_set_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_DEL_MAC, + .msg_handler = sss_nic_del_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_UPDATE_MAC, + .msg_handler = sss_nic_update_vf_mac_msg_handler, + }, +}; + +static const struct nic_event_handler *sss_nic_get_event_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_event_proc); + + for (i = 0; i < cmd_num; i++) + if (g_event_proc[i].opcode == opcode) + return &g_event_proc[i]; + + return NULL; +} + +static const struct sss_nic_vf_msg_handler *sss_nic_get_vf_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_vf_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_vf_cmd_proc[i].opcode == opcode) + return &g_vf_cmd_proc[i]; + + return NULL; +} + +static int sss_nic_init_vf_config(struct sss_nic_io *nic_io, u16 vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + u16 func_id; + int ret; + + vf_info->specified_mac = false; + ether_addr_copy(vf_info->drv_mac, vf_info->user_mac); + + if (!is_zero_ether_addr(vf_info->drv_mac)) { + vf_info->specified_mac = true; + func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + ret = sss_nic_set_mac(nic_io->nic_dev, vf_info->drv_mac, + vf_info->pf_vlan, func_id, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set VF %d MAC, ret: %d\n", id, ret); + return ret; + } + } + + if (SSSNIC_GET_VLAN_PRIO(vf_info->pf_vlan, vf_info->pf_qos) != 0) { + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_ADD, + vf_info->pf_vlan, vf_info->pf_qos, vf_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to add VF %d VLAN_QOS, ret: %d\n", id, ret); + return ret; + } + } + + if (vf_info->max_rate != 0) { + ret = sss_nic_set_vf_tx_rate_limit(nic_io, vf_id, + vf_info->min_rate, vf_info->max_rate); + if (ret != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d max rate %u, min rate %u, ret: %d\n", + id, vf_info->max_rate, vf_info->min_rate, ret); + return ret; + } + } + + return 0; +} + +static int sss_nic_attach_vf(struct sss_nic_io *nic_io, u16 vf_id, u32 extra_feature) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + int ret; + + vf_info->extra_feature = extra_feature; + + if (vf_id > nic_io->max_vf_num) { + nic_err(nic_io->dev_hdl, "Fail to register VF id %d out of range: [0-%d]\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), SSSNIC_HW_VF_ID_TO_OS(nic_io->max_vf_num)); + return -EFAULT; + } + + ret = sss_nic_init_vf_config(nic_io, vf_id); + if (ret != 0) + return ret; + + vf_info->attach = true; + + return 0; +} + +int sss_nic_dettach_vf(struct sss_nic_io *nic_io, u16 vf_id) +{ + struct sss_nic_mbx_mac_addr cmd_set_mac = {0}; + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + u16 out_len; + int ret; + + vf_info->extra_feature = 0; + + if (vf_id > nic_io->max_vf_num) { + nic_err(nic_io->dev_hdl, "Invalid vf_id %d, max_vf_num: %d\n", + vf_id, nic_io->max_vf_num); + return -EFAULT; + } + + vf_info->attach = false; + + if (!vf_info->specified_mac && vf_info->pf_vlan == 0) { + memset(vf_info->drv_mac, 0, ETH_ALEN); + return 0; + } + + out_len = sizeof(cmd_set_mac); + ether_addr_copy(cmd_set_mac.mac, vf_info->drv_mac); + cmd_set_mac.vlan_id = vf_info->pf_vlan; + cmd_set_mac.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_DEL_MAC, + &cmd_set_mac, sizeof(cmd_set_mac), + &cmd_set_mac, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_mac)) { + nic_err(nic_io->dev_hdl, + "Fail to delete the mac of VF %d, ret: %d, status: 0x%x, out_len: 0x%x\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), ret, + cmd_set_mac.head.state, out_len); + return -EFAULT; + } + + memset(vf_info->drv_mac, 0, ETH_ALEN); + + return 0; +} + +static int sss_nic_register_vf_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + int ret; + struct sss_nic_mbx_attach_vf *in_info = in_buf; + struct sss_nic_mbx_attach_vf *out_info = out_buf; + + if (in_info->op_register == SSSNIC_VF_UNREGISTER) + ret = sss_nic_dettach_vf(nic_io, vf_id); + else + ret = sss_nic_attach_vf(nic_io, vf_id, in_info->extra_feature); + + *out_size = sizeof(*out_info); + if (ret != 0) + out_info->head.state = EFAULT; + + return 0; +} + +static int sss_nic_get_vf_cos_msg_handler(struct sss_nic_io *nic_io, u16 vf_id, + void *in_buf, u16 in_size, void *out_buf, + u16 *out_size) +{ + struct sss_nic_mbx_vf_dcb_cfg *out_state = out_buf; + + *out_size = sizeof(*out_state); + out_state->head.state = SSS_MGMT_CMD_SUCCESS; + memcpy(&out_state->dcb_info, &nic_io->dcb_info, sizeof(nic_io->dcb_info)); + + return 0; +} + +static int sss_nic_get_vf_mac_msg_handler(struct sss_nic_io *nic_io, u16 vf_id, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + struct sss_nic_mbx_mac_addr *out_info = out_buf; + int ret; + + if (SSSNIC_SUPPORT_VF_MAC(nic_io)) { + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_GET_MAC, + in_buf, in_size, out_buf, out_size); + if (ret == 0) { + if (is_zero_ether_addr(out_info->mac)) + ether_addr_copy(out_info->mac, vf_info->drv_mac); + } + return ret; + } + + *out_size = sizeof(*out_info); + ether_addr_copy(out_info->mac, vf_info->drv_mac); + out_info->head.state = SSS_MGMT_CMD_SUCCESS; + + return 0; +} + +static int sss_nic_cmd_vf_mac(struct sss_nic_io *nic_io, struct sss_nic_vf_info *vf_info, + u16 cmd, void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + if (!vf_info->trust && vf_info->specified_mac && is_valid_ether_addr(in_mac->mac)) { + out_mac->head.state = SSSNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*out_mac); + nic_warn(nic_io->dev_hdl, + "PF has already set VF MAC address,and vf trust is off.\n"); + return 0; + } + if (is_valid_ether_addr(in_mac->mac)) + in_mac->vlan_id = vf_info->pf_vlan; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, cmd, in_buf, in_size, + out_buf, out_size); + if (ret != 0 || *out_size == 0) { + nic_warn(nic_io->dev_hdl, + "Fail to send vf mac, ret: %d,status: 0x%x, out size: 0x%x\n", + ret, out_mac->head.state, *out_size); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_set_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + ret = sss_nic_cmd_vf_mac(nic_io, vf_info, SSSNIC_MBX_OPCODE_SET_MAC, + in_buf, in_size, out_buf, out_size); + if (ret != 0) + return ret; + + if (is_valid_ether_addr(in_mac->mac) && + out_mac->head.state == SSS_MGMT_CMD_SUCCESS) + ether_addr_copy(vf_info->drv_mac, in_mac->mac); + + return 0; +} + +static int sss_nic_del_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + ret = sss_nic_cmd_vf_mac(nic_io, vf_info, SSSNIC_MBX_OPCODE_DEL_MAC, + in_buf, in_size, out_buf, out_size); + if (ret != 0) + return ret; + + if (is_valid_ether_addr(in_mac->mac) && + out_mac->head.state == SSS_MGMT_CMD_SUCCESS) + eth_zero_addr(vf_info->drv_mac); + + return 0; +} + +static int sss_nic_update_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_update *in_mac = in_buf; + struct sss_nic_mbx_mac_update *out_mac = out_buf; + int ret; + + if (!is_valid_ether_addr(in_mac->old_mac.mac)) { + nic_err(nic_io->dev_hdl, "Fail to update mac, Invalid mac.\n"); + return -EINVAL; + } + + if (!vf_info->trust && vf_info->specified_mac) { + out_mac->old_mac.head.state = SSSNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*out_mac); + nic_warn(nic_io->dev_hdl, + "PF has already set VF MAC address,and vf trust is off.\n"); + return 0; + } + + in_mac->old_mac.vlan_id = vf_info->pf_vlan; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, + SSSNIC_MBX_OPCODE_UPDATE_MAC, in_buf, in_size, + out_buf, out_size); + if (ret != 0 || *out_size == 0) { + nic_warn(nic_io->dev_hdl, + "Fail to update vf mac, ret: %d,status: 0x%x, out size: 0x%x\n", + ret, out_mac->old_mac.head.state, *out_size); + return -EFAULT; + } + + if (out_mac->old_mac.head.state == SSS_MGMT_CMD_SUCCESS) + ether_addr_copy(vf_info->drv_mac, in_mac->new_mac); + + return 0; +} + +static int _sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, + u16 *out_size, u16 channel) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + if (sss_nic_get_vf_cmd_proc(cmd)) + return sss_mbx_send_to_pf(hwdev, SSS_MOD_TYPE_L2NIC, cmd, in_buf, + in_size, out_buf, out_size, 0, channel); + + return sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_L2NIC, cmd, in_buf, + in_size, out_buf, out_size, 0, channel); +} + +int sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_l2nic_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, out_buf, + out_size, SSS_CHANNEL_NIC); +} + +int sss_nic_l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + return _sss_nic_l2nic_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, out_buf, + out_size, channel); +} + +/* pf/ppf handler mbx msg from vf */ +int sss_nic_pf_mbx_handler(void *hwdev, u16 vf_id, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_io *nic_io = NULL; + const struct sss_nic_vf_msg_handler *handler = NULL; + + if (!hwdev) + return -EFAULT; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + handler = sss_nic_get_vf_cmd_proc(cmd); + if (handler) + return handler->msg_handler(nic_io, vf_id, in_buf, in_size, out_buf, out_size); + + nic_warn(nic_io->dev_hdl, "NO handler for nic cmd(%u) received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +void sss_nic_notify_dcb_state_event(void *hwdev, + struct sss_nic_dcb_info *dcb_info) +{ + struct sss_event_info event_info = {0}; + + event_info.type = SSSNIC_EVENT_DCB_STATE_CHANGE; + event_info.service = SSS_EVENT_SRV_NIC; + memcpy((void *)event_info.event_data, dcb_info, sizeof(*dcb_info)); + + sss_do_event_callback(hwdev, &event_info); +} + +static void sss_nic_dcb_state_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_vf_dcb_cfg *dcb_cfg = in_buf; + + if (!dcb_cfg) + return; + + memcpy(&nic_io->dcb_info, &dcb_cfg->dcb_info, sizeof(dcb_cfg->dcb_info)); + sss_nic_notify_dcb_state_event(nic_io->hwdev, &dcb_cfg->dcb_info); +} + +static void sss_nic_tx_pause_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_msg_tx_pause_info *in_pause = in_buf; + + if (in_size != sizeof(*in_pause)) { + nic_err(nic_io->dev_hdl, "Invalid in buffer size value: %u,It should be %ld\n", + in_size, sizeof(*in_pause)); + return; + } + + nic_warn(nic_io->dev_hdl, "Receive tx pause exception event, excp: %u, level: %u\n", + in_pause->tx_pause_except, in_pause->except_level); + sss_fault_event_report(nic_io->hwdev, SSS_FAULT_SRC_TX_PAUSE_EXCP, + (u16)in_pause->except_level); +} + +static void sss_nic_bond_active_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_event_info in_info = {0}; + struct sss_nic_msg_bond_active_info *bond_info = in_buf; + + if (in_size != sizeof(*bond_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*bond_info)); + return; + } + + memcpy((void *)in_info.event_data, bond_info, sizeof(*bond_info)); + in_info.type = SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE; + in_info.service = SSS_EVENT_SRV_NIC; + sss_do_event_callback(nic_io->hwdev, &in_info); +} + +static int _sss_nic_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_io *nic_io = NULL; + const struct nic_event_handler *handler = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + *out_size = 0; + + handler = sss_nic_get_event_proc(cmd); + if (handler) { + handler->event_handler(nic_io, in_buf, in_size, out_buf, out_size); + return 0; + } + + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + *out_size = sizeof(struct sss_mgmt_msg_head); + nic_warn(nic_io->dev_hdl, "Unsupport nic event, cmd: %u\n", cmd); + + return 0; +} + +int sss_nic_vf_event_handler(void *hwdev, + u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +void sss_nic_pf_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + _sss_nic_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h new file mode 100644 index 00000000000000..7c1e37929dc9fa --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_EVENT_H +#define SSS_NIC_EVENT_H + +#include +#include + +#include "sss_hw_common.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_cfg_mag_define.h" + +enum sss_nic_event_type { + SSSNIC_EVENT_LINK_DOWN, + SSSNIC_EVENT_LINK_UP, + SSSNIC_EVENT_PORT_MODULE_EVENT, + SSSNIC_EVENT_DCB_STATE_CHANGE, + SSSNIC_EVENT_MAX +}; + +struct sss_nic_vf_msg_handler { + u16 opcode; + int (*msg_handler)(struct sss_nic_io *nic_io, + u16 vf, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); +}; + +struct nic_event_handler { + u16 opcode; + void (*event_handler)(struct sss_nic_io *nic_io, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +int sss_nic_dettach_vf(struct sss_nic_io *nic_io, u16 vf_id); + +int sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int sss_nic_l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel); + +int sss_nic_pf_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void sss_nic_notify_dcb_state_event(void *hwdev, + struct sss_nic_dcb_info *dcb_info); + +int sss_nic_vf_event_handler(void *hwdev, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void sss_nic_pf_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c new file mode 100644 index 00000000000000..159d51e07c1954 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_cfg.h" +#include "sss_nic_filter.h" + +enum sss_nic_rx_mode_state { + SSSNIC_PROMISC_ON, + SSSNIC_ALLMULTI_ON, + SSSNIC_PROMISC_FORCE_ON, + SSSNIC_ALLMULTI_FORCE_ON, +}; + +enum sss_nic_mac_filter_state { + SSSNIC_MAC_FILTER_WAIT_SYNC, + SSSNIC_MAC_FILTER_SYNCED, + SSSNIC_MAC_FILTER_WAIT_UNSYNC, + SSSNIC_MAC_FILTER_UNSYNCED, +}; + +struct sss_nic_mac_filter { + struct list_head list; + u8 address[ETH_ALEN]; + unsigned long status; +}; + +#define SSSNIC_DEFAULT_RX_MODE (SSSNIC_RX_MODE_UC | SSSNIC_RX_MODE_MC | SSSNIC_RX_MODE_BC) + +static bool mc_mac_filter = true; +module_param(mc_mac_filter, bool, 0444); +MODULE_PARM_DESC(mc_mac_filter, "Set multicast mac filter: 0 - disable, 1 - enable (default=1)"); + +static int sss_nic_sync_uc(struct net_device *netdev, u8 *address) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return sss_nic_set_mac(nic_dev, address, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); +} + +static int sss_nic_unsync_uc(struct net_device *netdev, u8 *address) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + /* The addr is in use */ + if (ether_addr_equal(address, netdev->dev_addr)) + return 0; + + return sss_nic_del_mac(nic_dev, address, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); +} + +void sss_nic_clean_mac_list_filter(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + struct net_device *netdev = nic_dev->netdev; + + list_for_each_entry_safe(filter, tmp_filter, &nic_dev->uc_filter_list, list) { + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + sss_nic_unsync_uc(netdev, filter->address); + list_del(&filter->list); + kfree(filter); + } + + list_for_each_entry_safe(filter, tmp_filter, &nic_dev->mc_filter_list, list) { + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + sss_nic_unsync_uc(netdev, filter->address); + list_del(&filter->list); + kfree(filter); + } +} + +static struct sss_nic_mac_filter *sss_nic_find_mac(const struct list_head *filter_list, + u8 *address) +{ + struct sss_nic_mac_filter *filter = NULL; + + list_for_each_entry(filter, filter_list, list) { + if (ether_addr_equal(address, filter->address)) + return filter; + } + return NULL; +} + +static struct sss_nic_mac_filter *sss_nic_add_filter(struct sss_nic_dev *nic_dev, + struct list_head *mac_filter_list, + u8 *address) +{ + struct sss_nic_mac_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + goto out; + + ether_addr_copy(filter->address, address); + + INIT_LIST_HEAD(&filter->list); + list_add_tail(&filter->list, mac_filter_list); + + filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + set_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + +out: + return filter; +} + +static void sss_nic_del_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_mac_filter *filter) +{ + set_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + + if (filter->status == SSSNIC_MAC_FILTER_WAIT_SYNC) { + /* have not added to hw, delete it directly */ + list_del(&filter->list); + kfree(filter); + return; + } + + filter->status = SSSNIC_MAC_FILTER_WAIT_UNSYNC; +} + +static struct sss_nic_mac_filter *sss_nic_copy_mac_filter_entry(const struct sss_nic_mac_filter *ft) +{ + struct sss_nic_mac_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return NULL; + + *filter = *ft; + INIT_LIST_HEAD(&filter->list); + + return filter; +} + +static void sss_nic_undo_del_filter_entry(struct list_head *filter_list, + const struct list_head *from) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + + list_for_each_entry_safe(filter, tmp_filter, from, list) { + if (sss_nic_find_mac(filter_list, filter->address)) + continue; + + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + filter->status = SSSNIC_MAC_FILTER_WAIT_UNSYNC; + + list_move_tail(&filter->list, filter_list); + } +} + +static void sss_nic_undo_add_filter_entry(struct list_head *filter_list, + const struct list_head *from) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + + list_for_each_entry_safe(filter, ftmp_filter, from, list) { + tmp_filter = sss_nic_find_mac(filter_list, filter->address); + if (tmp_filter && tmp_filter->status == SSSNIC_MAC_FILTER_SYNCED) + tmp_filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + } +} + +static void sss_nic_cleanup_filter_list(const struct list_head *head) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + + list_for_each_entry_safe(filter, ftmp_filter, head, list) { + list_del(&filter->list); + kfree(filter); + } +} + +static int sss_nic_sync_mac_filter_to_hw(struct sss_nic_dev *nic_dev, + struct list_head *del_list, + struct list_head *add_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + int add_num = 0; + + if (!list_empty(del_list)) { + list_for_each_entry_safe(filter, ftmp_filter, del_list, list) { + ret = sss_nic_unsync_uc(netdev, filter->address); + if (ret != 0) { /* ignore errors when delete mac */ + nic_err(nic_dev->dev_hdl, "Fail to delete mac\n"); + } + + list_del(&filter->list); + kfree(filter); + } + } + + if (!list_empty(add_list)) { + list_for_each_entry_safe(filter, ftmp_filter, add_list, list) { + ret = sss_nic_sync_uc(netdev, filter->address); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to add mac\n"); + return ret; + } + + add_num++; + list_del(&filter->list); + kfree(filter); + } + } + + return add_num; +} + +static int sss_nic_sync_mac_filter(struct sss_nic_dev *nic_dev, + struct list_head *mac_filter_list, bool uc) +{ + struct net_device *netdev = nic_dev->netdev; + struct list_head del_tmp_list; + struct list_head add_tmp_list; + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *fclone_filter = NULL; + int ret = 0; + int add_num = 0; + + INIT_LIST_HEAD(&del_tmp_list); + INIT_LIST_HEAD(&add_tmp_list); + + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_WAIT_UNSYNC) + continue; + + filter->status = SSSNIC_MAC_FILTER_UNSYNCED; + list_move_tail(&filter->list, &del_tmp_list); + } + + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_WAIT_SYNC) + continue; + + fclone_filter = sss_nic_copy_mac_filter_entry(filter); + if (!fclone_filter) { + ret = -ENOMEM; + break; + } + + filter->status = SSSNIC_MAC_FILTER_SYNCED; + list_add_tail(&fclone_filter->list, &add_tmp_list); + } + + if (ret != 0) { + sss_nic_undo_del_filter_entry(mac_filter_list, &del_tmp_list); + sss_nic_undo_add_filter_entry(mac_filter_list, &add_tmp_list); + nicif_err(nic_dev, drv, netdev, "Fail to clone mac_filter_entry\n"); + + sss_nic_cleanup_filter_list(&del_tmp_list); + sss_nic_cleanup_filter_list(&add_tmp_list); + return -ENOMEM; + } + + add_num = sss_nic_sync_mac_filter_to_hw(nic_dev, &del_tmp_list, &add_tmp_list); + if (list_empty(&add_tmp_list)) + return add_num; + + /* there are errors when add mac to hw, delete all mac in hw */ + sss_nic_undo_add_filter_entry(mac_filter_list, &add_tmp_list); + /* VF don't support to enter promisc mode, + * so we can't delete any other uc mac + */ + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_SYNCED) + continue; + + fclone_filter = sss_nic_copy_mac_filter_entry(filter); + if (!fclone_filter) + break; + + filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + list_add_tail(&fclone_filter->list, &del_tmp_list); + } + } + + sss_nic_cleanup_filter_list(&add_tmp_list); + sss_nic_sync_mac_filter_to_hw(nic_dev, &del_tmp_list, &add_tmp_list); + + /* need to enter promisc/allmulti mode */ + return -ENOMEM; +} + +static void sss_nic_sync_all_mac_filter(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int add_num; + + if (test_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { + clear_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + add_num = sss_nic_sync_mac_filter(nic_dev, &nic_dev->uc_filter_list, true); + if (add_num < 0 && SSSNIC_SUPPORT_PROMISC(nic_dev->nic_io)) { + set_bit(SSSNIC_PROMISC_FORCE_ON, &nic_dev->rx_mode); + nicif_info(nic_dev, drv, netdev, " Force promisc mode on\n"); + } else if (add_num != 0) { + clear_bit(SSSNIC_PROMISC_FORCE_ON, &nic_dev->rx_mode); + } + + add_num = sss_nic_sync_mac_filter(nic_dev, &nic_dev->mc_filter_list, false); + if (add_num < 0 && SSSNIC_SUPPORT_ALLMULTI(nic_dev->nic_io)) { + set_bit(SSSNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mode); + nicif_info(nic_dev, drv, netdev, "Force allmulti mode on\n"); + } else if (add_num != 0) { + clear_bit(SSSNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mode); + } + } +} + +static void sss_nic_update_mac_filter(struct sss_nic_dev *nic_dev, + const struct netdev_hw_addr_list *src_list, + struct list_head *filter_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *f_filter = NULL; + struct netdev_hw_addr *hw_addr = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(hw_addr, src_list) { + filter = sss_nic_find_mac(filter_list, hw_addr->addr); + if (!filter) + sss_nic_add_filter(nic_dev, filter_list, hw_addr->addr); + else if (filter->status == SSSNIC_MAC_FILTER_WAIT_UNSYNC) + filter->status = SSSNIC_MAC_FILTER_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f_filter, ftmp_filter, filter_list, list) { + bool find = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(hw_addr, src_list) + if (ether_addr_equal(hw_addr->addr, f_filter->address)) { + find = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (find) + continue; + + sss_nic_del_filter(nic_dev, f_filter); + } +} + +#ifndef NETDEV_HW_ADDR_T_MULTICAST +static void sss_nic_update_mc_filter(struct sss_nic_dev *nic_dev, + struct list_head *filter_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *f_filter = NULL; + struct dev_mc_list *hw_addr = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(hw_addr, nic_dev->netdev) { + filter = sss_nic_find_mac(filter_list, hw_addr->da_addr); + if (!filter) + sss_nic_add_filter(nic_dev, filter_list, hw_addr->da_addr); + else if (filter->status == SSSNIC_MAC_FILTER_WAIT_UNSYNC) + filter->status = SSSNIC_MAC_FILTER_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f_filter, ftmp_filter, filter_list, list) { + bool find = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(hw_addr, nic_dev->netdev) + if (ether_addr_equal(hw_addr->da_addr, f_filter->address)) { + find = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (find) + continue; + + sss_nic_del_filter(nic_dev, f_filter); + } +} +#endif + +static void sss_nic_update_all_mac_filter(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + if (test_and_clear_bit(SSSNIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { + sss_nic_update_mac_filter(nic_dev, &netdev->uc, + &nic_dev->uc_filter_list); + if (mc_mac_filter) { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + sss_nic_update_mac_filter(nic_dev, &netdev->mc, &nic_dev->mc_filter_list); +#else + sss_nic_update_mc_filter(nic_dev, &nic_dev->mc_filter_list); +#endif + } + } +} + +static void sss_nic_sync_rx_mode_to_hw(struct sss_nic_dev *nic_dev, int allmulti_enter, + int promisc_enter) +{ + int ret; + u32 rx_mode = SSSNIC_DEFAULT_RX_MODE; + struct net_device *netdev = nic_dev->netdev; + + rx_mode |= (allmulti_enter ? SSSNIC_RX_MODE_MC_ALL : 0); + rx_mode |= (promisc_enter ? SSSNIC_RX_MODE_PROMISC : 0); + + if (allmulti_enter != + test_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode)) + nicif_info(nic_dev, drv, netdev, + "%s allmulti mode\n", + allmulti_enter ? "Enable" : "Disable"); + + if (promisc_enter != test_bit(SSSNIC_PROMISC_ON, + &nic_dev->rx_mode)) + nicif_info(nic_dev, drv, netdev, + "%s promisc mode\n", + promisc_enter ? "Enable" : "Disable"); + + ret = sss_nic_set_rx_mode(nic_dev, rx_mode); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set rx mode\n"); + return; + } + + if (allmulti_enter != 0) + set_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode); + else + clear_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode); + + if (promisc_enter != 0) + set_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode); + else + clear_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode); +} + +void sss_nic_set_rx_mode_work(struct work_struct *work) +{ + struct sss_nic_dev *nic_dev = + container_of(work, struct sss_nic_dev, rx_mode_work); + struct net_device *netdev = nic_dev->netdev; + int allmulti_enter = 0; + int promisc_enter = 0; + + sss_nic_update_all_mac_filter(nic_dev); + + sss_nic_sync_all_mac_filter(nic_dev); + + if (SSSNIC_SUPPORT_ALLMULTI(nic_dev->nic_io)) + allmulti_enter = !!(netdev->flags & IFF_ALLMULTI) || + test_bit(SSSNIC_ALLMULTI_FORCE_ON, + &nic_dev->rx_mode); + + if (SSSNIC_SUPPORT_PROMISC(nic_dev->nic_io)) + promisc_enter = !!(netdev->flags & IFF_PROMISC) || + test_bit(SSSNIC_PROMISC_FORCE_ON, + &nic_dev->rx_mode); + + if (allmulti_enter != + test_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode) || + promisc_enter != + test_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode)) + sss_nic_sync_rx_mode_to_hw(nic_dev, allmulti_enter, promisc_enter); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h new file mode 100644 index 00000000000000..65d13b459fc91c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_FILTER_H +#define SSS_NIC_FILTER_H + +#include +#include "sss_nic_dev_define.h" + +void sss_nic_set_rx_mode_work(struct work_struct *work); +void sss_nic_clean_mac_list_filter(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c new file mode 100644 index 00000000000000..fc49b645d96fea --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c @@ -0,0 +1,953 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_io.h" +#include "sss_nic_event.h" + +#define SSSNIC_DEAULT_DROP_THD_OFF 0 +#define SSSNIC_DEAULT_DROP_THD_ON (0xFFFF) +#define SSSNIC_DEAULT_TX_CI_PENDING_LIMIT 1 +#define SSSNIC_DEAULT_TX_CI_COALESCING_TIME 1 +#define SSSNIC_WQ_PREFETCH_MIN 1 +#define SSSNIC_WQ_PREFETCH_MAX 4 +#define SSSNIC_WQ_PREFETCH_THRESHOLD 256 +#define SSSNIC_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */ + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SSSNIC_CI_Q_ADDR_SIZE (64) + +#define SSSNIC_CI_TABLE_SIZE(num_qps, pg_sz) \ + (ALIGN((num_qps) * SSSNIC_CI_Q_ADDR_SIZE, pg_sz)) + +#define SSSNIC_CI_PADDR(base_paddr, qid) ((base_paddr) + \ + (qid) * SSSNIC_CI_Q_ADDR_SIZE) + +#define SSSNIC_CI_VADDR(base_addr, qid) ((u8 *)(base_addr) + \ + (qid) * SSSNIC_CI_Q_ADDR_SIZE) + +#define SSSNIC_SQ_CTX_SIZE(num_sqs) ((u16)(sizeof(struct sss_nic_qp_ctx_header) \ + + (num_sqs) * sizeof(struct sss_nic_sq_ctx))) + +#define SSSNIC_RQ_CTX_SIZE(num_rqs) ((u16)(sizeof(struct sss_nic_qp_ctx_header) \ + + (num_rqs) * sizeof(struct sss_nic_rq_ctx))) + +#define SSSNIC_CI_ID_HIGH_SHIFH 12 +#define SSSNIC_CI_HIGN_ID(val) ((val) >> SSSNIC_CI_ID_HIGH_SHIFH) + +#define SSSNIC_SQ_CTX_MODE_SP_FLAG_SHIFT 0 +#define SSSNIC_SQ_CTX_MODE_PKT_DROP_SHIFT 1 + +#define SSSNIC_SQ_CTX_MODE_SP_FLAG_MASK 0x1U +#define SSSNIC_SQ_CTX_MODE_PKT_DROP_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_MODE(val, member) \ + (((val) & SSSNIC_SQ_CTX_MODE_##member##_MASK) \ + << SSSNIC_SQ_CTX_MODE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PI_ID_SHIFT 0 +#define SSSNIC_SQ_CTX_CI_ID_SHIFT 16 + +#define SSSNIC_SQ_CTX_PI_ID_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_CI_ID_MASK 0xFFFFU + +#define SSSNIC_SET_SQ_CTX_CI_PI(val, member) \ + (((val) & SSSNIC_SQ_CTX_##member##_MASK) \ + << SSSNIC_SQ_CTX_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_WQ_PAGE_HI_PFN_SHIFT 0 +#define SSSNIC_SQ_CTX_WQ_PAGE_OWNER_SHIFT 23 + +#define SSSNIC_SQ_CTX_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SSSNIC_SQ_CTX_WQ_PAGE_OWNER_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_WQ_PAGE(val, member) \ + (((val) & SSSNIC_SQ_CTX_WQ_PAGE_##member##_MASK) \ + << SSSNIC_SQ_CTX_WQ_PAGE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_GLOBAL_SQ_ID_SHIFT 0 + +#define SSSNIC_SQ_CTX_GLOBAL_SQ_ID_MASK 0x1FFFU + +#define SSSNIC_SET_SQ_CTX_GLOBAL_QUEUE_ID(val, member) \ + (((val) & SSSNIC_SQ_CTX_##member##_MASK) \ + << SSSNIC_SQ_CTX_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PKT_DROP_THD_ON_SHIFT 0 +#define SSSNIC_SQ_CTX_PKT_DROP_THD_OFF_SHIFT 16 + +#define SSSNIC_SQ_CTX_PKT_DROP_THD_ON_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_PKT_DROP_THD_OFF_MASK 0xFFFFU + +#define SSSNIC_SET_SQ_CTX_PKT_DROP_THD(val, member) \ + (((val) & SSSNIC_SQ_CTX_PKT_DROP_##member##_MASK) \ + << SSSNIC_SQ_CTX_PKT_DROP_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_CACHE_MAX_SHIFT 14 +#define SSSNIC_SQ_CTX_PREF_CACHE_MIN_SHIFT 25 + +#define SSSNIC_SQ_CTX_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SSSNIC_SQ_CTX_PREF_CACHE_MAX_MASK 0x7FFU +#define SSSNIC_SQ_CTX_PREF_CACHE_MIN_MASK 0x7FU + +#define SSSNIC_SQ_CTX_PREF_CI_HI_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_OWNER_SHIFT 4 + +#define SSSNIC_SQ_CTX_PREF_CI_HI_MASK 0xFU +#define SSSNIC_SQ_CTX_PREF_OWNER_MASK 0x1U + +#define SSSNIC_SQ_CTX_PREF_WQ_PFN_HI_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_CI_LOW_SHIFT 20 + +#define SSSNIC_SQ_CTX_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SSSNIC_SQ_CTX_PREF_CI_LOW_MASK 0xFFFU + +#define SSSNIC_SET_SQ_CTX_PREF(val, member) \ + (((val) & SSSNIC_SQ_CTX_PREF_##member##_MASK) \ + << SSSNIC_SQ_CTX_PREF_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_WQ_PAGE_HI_PFN_SHIFT 0 +#define SSSNIC_RQ_CTX_WQ_PAGE_WQE_TYPE_SHIFT 28 +#define SSSNIC_RQ_CTX_WQ_PAGE_OWNER_SHIFT 31 + +#define SSSNIC_RQ_CTX_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SSSNIC_RQ_CTX_WQ_PAGE_WQE_TYPE_MASK 0x3U +#define SSSNIC_RQ_CTX_WQ_PAGE_OWNER_MASK 0x1U + +#define SSSNIC_SET_RQ_CTX_WQ_PAGE(val, member) \ + (((val) & SSSNIC_RQ_CTX_WQ_PAGE_##member##_MASK) << \ + SSSNIC_RQ_CTX_WQ_PAGE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_VLAN_TAG_SHIFT 0 +#define SSSNIC_SQ_CTX_VLAN_TYPE_SEL_SHIFT 16 +#define SSSNIC_SQ_CTX_VLAN_INSERT_MODE_SHIFT 19 +#define SSSNIC_SQ_CTX_VLAN_CEQ_EN_SHIFT 23 + +#define SSSNIC_SQ_CTX_VLAN_TAG_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_VLAN_TYPE_SEL_MASK 0x7U +#define SSSNIC_SQ_CTX_VLAN_INSERT_MODE_MASK 0x3U +#define SSSNIC_SQ_CTX_VLAN_CEQ_EN_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_VLAN_CEQ(val, member) \ + (((val) & SSSNIC_SQ_CTX_VLAN_##member##_MASK) \ + << SSSNIC_SQ_CTX_VLAN_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_PI_ID_SHIFT 0 +#define SSSNIC_RQ_CTX_CI_ID_SHIFT 16 + +#define SSSNIC_RQ_CTX_PI_ID_MASK 0xFFFFU +#define SSSNIC_RQ_CTX_CI_ID_MASK 0xFFFFU + +#define SSSNIC_SET_RQ_CTX_CI_PI(val, member) \ + (((val) & SSSNIC_RQ_CTX_##member##_MASK) \ + << SSSNIC_RQ_CTX_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_CEQ_ATTR_INTR_SHIFT 21 +#define SSSNIC_RQ_CTX_CEQ_ATTR_EN_SHIFT 31 + +#define SSSNIC_RQ_CTX_CEQ_ATTR_INTR_MASK 0x3FFU +#define SSSNIC_RQ_CTX_CEQ_ATTR_EN_MASK 0x1U + +#define SSSNIC_SET_RQ_CTX_CEQ_ATTR(val, member) \ + (((val) & SSSNIC_RQ_CTX_CEQ_ATTR_##member##_MASK) \ + << SSSNIC_RQ_CTX_CEQ_ATTR_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SSSNIC_SQ_CTX_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SSSNIC_SET_SQ_CTX_WQ_BLOCK(val, member) \ + (((val) & SSSNIC_SQ_CTX_WQ_BLOCK_##member##_MASK) \ + << SSSNIC_SQ_CTX_WQ_BLOCK_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_CACHE_MAX_SHIFT 14 +#define SSSNIC_RQ_CTX_PREF_CACHE_MIN_SHIFT 25 + +#define SSSNIC_RQ_CTX_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SSSNIC_RQ_CTX_PREF_CACHE_MAX_MASK 0x7FFU +#define SSSNIC_RQ_CTX_PREF_CACHE_MIN_MASK 0x7FU + +#define SSSNIC_RQ_CTX_PREF_CI_HI_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_OWNER_SHIFT 4 + +#define SSSNIC_RQ_CTX_PREF_CI_HI_MASK 0xFU +#define SSSNIC_RQ_CTX_PREF_OWNER_MASK 0x1U + +#define SSSNIC_RQ_CTX_PREF_WQ_PFN_HI_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_CI_LOW_SHIFT 20 + +#define SSSNIC_RQ_CTX_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SSSNIC_RQ_CTX_PREF_CI_LOW_MASK 0xFFFU + +#define SSSNIC_SET_RQ_CTX_PREF(val, member) \ + (((val) & SSSNIC_RQ_CTX_PREF_##member##_MASK) << \ + SSSNIC_RQ_CTX_PREF_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_CQE_LEN_SHIFT 28 + +#define SSSNIC_RQ_CTX_CQE_LEN_MASK 0x3U + +#define SSSNIC_SET_RQ_CTX_CQE_LEN(val, member) \ + (((val) & SSSNIC_RQ_CTX_##member##_MASK) << \ + SSSNIC_RQ_CTX_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SSSNIC_RQ_CTX_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SSSNIC_SET_RQ_CTX_WQ_BLOCK(val, member) \ + (((val) & SSSNIC_RQ_CTX_WQ_BLOCK_##member##_MASK) << \ + SSSNIC_RQ_CTX_WQ_BLOCK_##member##_SHIFT) + +#define SSSNIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> 12) +#define SSSNIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> 9) + +enum sss_nic_qp_ctx_type { + SSSNIC_QP_CTX_TYPE_SQ, + SSSNIC_QP_CTX_TYPE_RQ, +}; + +struct sss_nic_qp_ctx_header { + u16 q_num; + u16 q_type; + u16 start_qid; + u16 rsvd; +}; + +struct sss_nic_clear_q_ctx { + struct sss_nic_qp_ctx_header ctrlq_hdr; + u32 rsvd; +}; + +struct sss_nic_rq_ctx { + u32 ci_pi; + u32 ceq_attr; + u32 hi_wq_pfn; + u32 lo_wq_pfn; + + u32 rsvd[3]; + u32 cqe_sge_len; + + u32 pref_cache; + u32 pref_ci_owner; + u32 hi_pref_wq_pfn_ci; + u32 lo_pref_wq_pfn; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + u32 hi_wq_block_pfn; + u32 lo_wq_block_pfn; +}; + +struct sss_nic_sq_ctx { + u32 ci_pi; + u32 drop_mode_sp; + u32 hi_wq_pfn; + u32 lo_wq_pfn; + + u32 rsvd0; + u32 pkt_drop_thd; + u32 global_sq_id; + u32 vlan_ceq_attr; + + u32 pref_cache; + u32 pref_ci_owner; + u32 hi_pref_wq_pfn_ci; + u32 lo_pref_wq_pfn; + + u32 rsvd8; + u32 rsvd9; + u32 hi_wq_block_pfn; + u32 lo_wq_block_pfn; +}; + +struct sss_nic_rq_ctx_block { + struct sss_nic_qp_ctx_header ctrlq_hdr; + struct sss_nic_rq_ctx rq_ctxt[SSSNIC_Q_CTXT_MAX]; +}; + +struct sss_nic_sq_ctx_block { + struct sss_nic_qp_ctx_header ctrlq_hdr; + struct sss_nic_sq_ctx sq_ctxt[SSSNIC_Q_CTXT_MAX]; +}; + +static int sss_nic_create_sq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *sq, + u16 qid, u32 sq_depth, u16 msix_id) +{ + int ret = 0; + + sq->qid = qid; + sq->msix_id = msix_id; + sq->owner = 1; + + ret = sss_create_wq(nic_io->hwdev, &sq->wq, sq_depth, + (u16)BIT(SSSNIC_SQ_WQEBB_SHIFT)); + if (ret != 0) + nic_err(nic_io->dev_hdl, "Fail to create sq(%u) wq\n", qid); + + return ret; +} + +static void sss_nic_destroy_sq(struct sss_nic_io_queue *sq) +{ + sss_destroy_wq(&sq->wq); +} + +static int sss_nic_create_rq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, + u16 qid, u32 rq_depth, u16 msix_id) +{ + int ret = 0; + + rq->qid = qid; + rq->msix_id = msix_id; + rq->wqe_type = SSSNIC_NORMAL_RQ_WQE; + + rq->rx.pi_vaddr = dma_zalloc_coherent(nic_io->dev_hdl, PAGE_SIZE, + &rq->rx.pi_daddr, GFP_KERNEL); + if (!rq->rx.pi_vaddr) { + nic_err(nic_io->dev_hdl, "Fail to allocate rq pi virt addr\n"); + return -ENOMEM; + } + + ret = sss_create_wq(nic_io->hwdev, &rq->wq, rq_depth, + (u16)BIT(SSSNIC_RQ_WQEBB_SHIFT + SSSNIC_NORMAL_RQ_WQE)); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create rq(%u) wq\n", qid); + dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_vaddr, + rq->rx.pi_daddr); + return ret; + } + + return 0; +} + +static void sss_nic_destroy_rq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq) +{ + dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_vaddr, + rq->rx.pi_daddr); + + sss_destroy_wq(&rq->wq); +} + +static int sss_nic_create_qp(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, struct sss_nic_io_queue *sq, + u32 rq_depth, u32 sq_depth, u16 qid, u16 qp_msix_id) +{ + int ret = 0; + + ret = sss_nic_create_rq(nic_io, rq, qid, rq_depth, qp_msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create rq, qid: %u\n", qid); + return ret; + } + + ret = sss_nic_create_sq(nic_io, sq, qid, sq_depth, qp_msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create sq, qid: %u\n", qid); + sss_nic_destroy_rq(nic_io, rq); + } + + return ret; +} + +static void sss_nic_destroy_qp(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, struct sss_nic_io_queue *sq) +{ + sss_nic_destroy_rq(nic_io, rq); + sss_nic_destroy_sq(sq); +} + +int sss_nic_io_resource_init(struct sss_nic_io *nic_io) +{ + void __iomem *db_base = NULL; + int ret = 0; + + nic_io->max_qp_num = sss_get_max_sq_num(nic_io->hwdev); + + nic_io->ci_base_vaddr = dma_zalloc_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, + PAGE_SIZE), + &nic_io->ci_base_daddr, GFP_KERNEL); + if (!nic_io->ci_base_vaddr) { + nic_err(nic_io->dev_hdl, "Fail to alloc ci dma buf\n"); + return -ENOMEM; + } + + ret = sss_alloc_db_addr(nic_io->hwdev, &db_base); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to alloc sq doorbell\n"); + goto out; + } + nic_io->sq_db_addr = (u8 *)db_base; + + ret = sss_alloc_db_addr(nic_io->hwdev, &db_base); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to alloc rq doorbell\n"); + sss_free_db_addr(nic_io->hwdev, nic_io->sq_db_addr); + goto out; + } + nic_io->rq_db_addr = (u8 *)db_base; + + return 0; + +out: + dma_free_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, PAGE_SIZE), + nic_io->ci_base_vaddr, nic_io->ci_base_daddr); + nic_io->ci_base_vaddr = NULL; + + return -ENOMEM; +} + +void sss_nic_io_resource_deinit(struct sss_nic_io *nic_io) +{ + dma_free_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, PAGE_SIZE), + nic_io->ci_base_vaddr, nic_io->ci_base_daddr); + + sss_free_db_addr(nic_io->hwdev, nic_io->sq_db_addr); + sss_free_db_addr(nic_io->hwdev, nic_io->rq_db_addr); +} + +int sss_nic_alloc_qp(struct sss_nic_io *nic_io, + struct sss_irq_desc *qp_msix_arry, struct sss_nic_qp_info *qp_info) +{ + u16 i; + u16 qid; + int ret = 0; + struct sss_nic_io_queue *rq_group = NULL; + struct sss_nic_io_queue *sq_group = NULL; + + if (qp_info->qp_num > nic_io->max_qp_num || qp_info->qp_num == 0) + return -EINVAL; + + rq_group = kcalloc(qp_info->qp_num, sizeof(*rq_group), GFP_KERNEL); + if (!rq_group) + return -ENOMEM; + + sq_group = kcalloc(qp_info->qp_num, sizeof(*sq_group), GFP_KERNEL); + if (!sq_group) { + ret = -ENOMEM; + nic_err(nic_io->dev_hdl, "Fail to allocate sq\n"); + goto alloc_sq_err; + } + + for (qid = 0; qid < qp_info->qp_num; qid++) { + ret = sss_nic_create_qp(nic_io, &rq_group[qid], &sq_group[qid], + qp_info->rq_depth, qp_info->sq_depth, qid, + qp_msix_arry[qid].msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, + "Fail to allocate qp %u, err: %d\n", qid, ret); + goto create_qp_err; + } + } + + qp_info->rq_group = rq_group; + qp_info->sq_group = sq_group; + + return 0; + +create_qp_err: + for (i = 0; i < qid; i++) + sss_nic_destroy_qp(nic_io, &rq_group[i], &sq_group[i]); + + kfree(sq_group); + +alloc_sq_err: + kfree(rq_group); + + return ret; +} + +void sss_nic_free_qp(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info) +{ + u16 qid; + + for (qid = 0; qid < qp_info->qp_num; qid++) + sss_nic_destroy_qp(nic_io, &qp_info->rq_group[qid], + &qp_info->sq_group[qid]); + + kfree(qp_info->rq_group); + kfree(qp_info->sq_group); + qp_info->rq_group = NULL; + qp_info->sq_group = NULL; +} + +static void sss_nic_init_db_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + u16 qid; + u16 *ci_addr = NULL; + + for (qid = 0; qid < nic_io->active_qp_num; qid++) { + qp_info->rq_group[qid].db_addr = nic_io->rq_db_addr; + qp_info->sq_group[qid].db_addr = nic_io->sq_db_addr; + qp_info->sq_group[qid].tx.ci_addr = + SSSNIC_CI_VADDR(nic_io->ci_base_vaddr, qid); + ci_addr = (u16 *)qp_info->sq_group[qid].tx.ci_addr; + *ci_addr = 0; + } +} + +int sss_nic_init_qp_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + nic_io->rq_group = qp_info->rq_group; + nic_io->sq_group = qp_info->sq_group; + nic_io->active_qp_num = qp_info->qp_num; + + sss_nic_init_db_info(nic_io, qp_info); + + return sss_nic_init_qp_ctx(nic_io); +} + +void sss_nic_deinit_qp_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + qp_info->qp_num = nic_io->active_qp_num; + qp_info->rq_group = nic_io->rq_group; + qp_info->sq_group = nic_io->sq_group; + + sss_nic_deinit_qp_ctx(nic_io->hwdev); +} + +static void sss_nic_fill_qp_ctx_ctrlq_header(struct sss_nic_qp_ctx_header *qp_ctx_hdr, + enum sss_nic_qp_ctx_type ctx_type, + u16 queue_num, u16 qid) +{ + qp_ctx_hdr->rsvd = 0; + qp_ctx_hdr->start_qid = qid; + qp_ctx_hdr->q_num = queue_num; + qp_ctx_hdr->q_type = ctx_type; + sss_cpu_to_be32(qp_ctx_hdr, sizeof(*qp_ctx_hdr)); +} + +static void sss_nic_fill_sq_ctx_ctrlq_body(struct sss_nic_io_queue *sq, u16 qid, + struct sss_nic_sq_ctx *sq_ctx) +{ + u16 ci_start; + u16 pi_start; + u32 lo_wq_block_pfn; + u32 hi_wq_block_pfn; + u32 lo_wq_page_pfn; + u32 hi_wq_page_pfn; + u64 wq_block_pfn; + u64 wq_page_addr; + u64 wq_page_pfn; + + pi_start = sss_nic_get_sq_local_pi(sq); + ci_start = sss_nic_get_sq_local_ci(sq); + + wq_block_pfn = SSSNIC_WQ_BLOCK_PFN(sq->wq.block_paddr); + lo_wq_block_pfn = lower_32_bits(wq_block_pfn); + hi_wq_block_pfn = upper_32_bits(wq_block_pfn); + + wq_page_addr = sss_wq_get_first_wqe_page_addr(&sq->wq); + wq_page_pfn = SSSNIC_WQ_PAGE_PFN(wq_page_addr); + lo_wq_page_pfn = lower_32_bits(wq_page_pfn); + hi_wq_page_pfn = upper_32_bits(wq_page_pfn); + + sq_ctx->rsvd0 = 0; + + sq_ctx->drop_mode_sp = + SSSNIC_SET_SQ_CTX_MODE(0, SP_FLAG) | + SSSNIC_SET_SQ_CTX_MODE(0, PKT_DROP); + + sq_ctx->ci_pi = + SSSNIC_SET_SQ_CTX_CI_PI(ci_start, CI_ID) | + SSSNIC_SET_SQ_CTX_CI_PI(pi_start, PI_ID); + + sq_ctx->global_sq_id = + SSSNIC_SET_SQ_CTX_GLOBAL_QUEUE_ID(qid, GLOBAL_SQ_ID); + + sq_ctx->pkt_drop_thd = + SSSNIC_SET_SQ_CTX_PKT_DROP_THD(SSSNIC_DEAULT_DROP_THD_ON, THD_ON) | + SSSNIC_SET_SQ_CTX_PKT_DROP_THD(SSSNIC_DEAULT_DROP_THD_OFF, THD_OFF); + + sq_ctx->vlan_ceq_attr = + SSSNIC_SET_SQ_CTX_VLAN_CEQ(0, CEQ_EN) | + SSSNIC_SET_SQ_CTX_VLAN_CEQ(1, INSERT_MODE); + + sq_ctx->pref_ci_owner = + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_CI_HIGN_ID(ci_start), CI_HI) | + SSSNIC_SET_SQ_CTX_PREF(1, OWNER); + + sq_ctx->pref_cache = + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MIN, CACHE_MIN) | + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MAX, CACHE_MAX) | + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctx->lo_pref_wq_pfn = lo_wq_page_pfn; + + sq_ctx->hi_pref_wq_pfn_ci = + SSSNIC_SET_SQ_CTX_PREF(ci_start, CI_LOW) | + SSSNIC_SET_SQ_CTX_PREF(hi_wq_page_pfn, WQ_PFN_HI); + + sq_ctx->lo_wq_pfn = lo_wq_page_pfn; + + sq_ctx->hi_wq_pfn = + SSSNIC_SET_SQ_CTX_WQ_PAGE(hi_wq_page_pfn, HI_PFN) | + SSSNIC_SET_SQ_CTX_WQ_PAGE(1, OWNER); + + sq_ctx->lo_wq_block_pfn = lo_wq_block_pfn; + + sq_ctx->hi_wq_block_pfn = + SSSNIC_SET_SQ_CTX_WQ_BLOCK(hi_wq_block_pfn, PFN_HI); + + sss_cpu_to_be32(sq_ctx, sizeof(*sq_ctx)); +} + +static void sss_nic_fill_rq_ctx_ctrlq_body(struct sss_nic_io_queue *rq, + struct sss_nic_rq_ctx *rq_ctx) +{ + u16 wqe_type = rq->wqe_type; + u16 ci_start = (u16)((u32)sss_nic_get_rq_local_ci(rq) << wqe_type); + u16 pi_start = (u16)((u32)sss_nic_get_rq_local_pi(rq) << wqe_type); + u64 wq_page_addr = sss_wq_get_first_wqe_page_addr(&rq->wq); + u64 wq_page_pfn = SSSNIC_WQ_PAGE_PFN(wq_page_addr); + u64 wq_block_pfn = SSSNIC_WQ_BLOCK_PFN(rq->wq.block_paddr); + u32 lo_wq_page_pfn = lower_32_bits(wq_page_pfn); + u32 hi_wq_page_pfn = upper_32_bits(wq_page_pfn); + u32 lo_wq_block_pfn = lower_32_bits(wq_block_pfn); + u32 hi_wq_block_pfn = upper_32_bits(wq_block_pfn); + + rq_ctx->ceq_attr = SSSNIC_SET_RQ_CTX_CEQ_ATTR(0, EN) | + SSSNIC_SET_RQ_CTX_CEQ_ATTR(rq->msix_id, INTR); + + rq_ctx->ci_pi = + SSSNIC_SET_RQ_CTX_CI_PI(ci_start, CI_ID) | + SSSNIC_SET_RQ_CTX_CI_PI(pi_start, PI_ID); + + rq_ctx->pref_cache = + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MIN, CACHE_MIN) | + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MAX, CACHE_MAX) | + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctx->pref_ci_owner = + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_CI_HIGN_ID(ci_start), CI_HI) | + SSSNIC_SET_RQ_CTX_PREF(1, OWNER); + + rq_ctx->lo_wq_pfn = lo_wq_page_pfn; + + rq_ctx->hi_wq_pfn = + SSSNIC_SET_RQ_CTX_WQ_PAGE(hi_wq_page_pfn, HI_PFN) | + SSSNIC_SET_RQ_CTX_WQ_PAGE(1, OWNER); + + if (wqe_type == SSSNIC_EXTEND_RQ_WQE) { + rq_ctx->hi_wq_pfn |= + SSSNIC_SET_RQ_CTX_WQ_PAGE(0, WQE_TYPE); + } else if (wqe_type == SSSNIC_NORMAL_RQ_WQE) { + rq_ctx->cqe_sge_len = SSSNIC_SET_RQ_CTX_CQE_LEN(1, CQE_LEN); + rq_ctx->hi_wq_pfn |= + SSSNIC_SET_RQ_CTX_WQ_PAGE(2, WQE_TYPE); + } else { + pr_err("Invalid rq wqe type: %u", wqe_type); + } + + rq_ctx->lo_pref_wq_pfn = lo_wq_page_pfn; + rq_ctx->hi_pref_wq_pfn_ci = + SSSNIC_SET_RQ_CTX_PREF(hi_wq_page_pfn, WQ_PFN_HI) | + SSSNIC_SET_RQ_CTX_PREF(ci_start, CI_LOW); + + rq_ctx->lo_wq_block_pfn = lo_wq_block_pfn; + rq_ctx->hi_wq_block_pfn = + SSSNIC_SET_RQ_CTX_WQ_BLOCK(hi_wq_block_pfn, PFN_HI); + + rq_ctx->pi_paddr_lo = lower_32_bits(rq->rx.pi_daddr); + rq_ctx->pi_paddr_hi = upper_32_bits(rq->rx.pi_daddr); + + sss_cpu_to_be32(rq_ctx, sizeof(*rq_ctx)); +} + +static int sss_nic_send_sq_ctx_by_ctrlq(struct sss_nic_io *nic_io, + struct sss_ctrl_msg_buf *msg_buf, u16 qid) +{ + u16 i; + u16 max_qp; + u64 out_param = 0; + int ret; + struct sss_nic_sq_ctx_block *sq_ctx_block = msg_buf->buf; + + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + sss_nic_fill_qp_ctx_ctrlq_header(&sq_ctx_block->ctrlq_hdr, + SSSNIC_QP_CTX_TYPE_SQ, max_qp, qid); + + for (i = 0; i < max_qp; i++) + sss_nic_fill_sq_ctx_ctrlq_body(&nic_io->sq_group[qid + i], qid + i, + &sq_ctx_block->sq_ctxt[i]); + + msg_buf->size = SSSNIC_SQ_CTX_SIZE(max_qp); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set sq ctxt, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + return -EFAULT; + } + + return 0; +} + +static int sss_nic_send_sq_ctx_to_hw(struct sss_nic_io *nic_io) +{ + int ret = 0; + u16 qid = 0; + u16 max_qp; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + while (qid < nic_io->active_qp_num) { + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + ret = sss_nic_send_sq_ctx_by_ctrlq(nic_io, msg_buf, qid); + if (ret) { + nic_err(nic_io->dev_hdl, + "Fail to set sq ctx, qid: %u\n", qid); + break; + } + + qid += max_qp; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_send_rq_ctx_by_ctrlq(struct sss_nic_io *nic_io, + struct sss_ctrl_msg_buf *msg_buf, u16 qid) +{ + u16 i; + u16 max_qp; + u64 out_param = 0; + int ret; + struct sss_nic_rq_ctx_block *rq_ctx_block = msg_buf->buf; + + rq_ctx_block = msg_buf->buf; + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + + sss_nic_fill_qp_ctx_ctrlq_header(&rq_ctx_block->ctrlq_hdr, + SSSNIC_QP_CTX_TYPE_RQ, max_qp, qid); + + for (i = 0; i < max_qp; i++) + sss_nic_fill_rq_ctx_ctrlq_body(&nic_io->rq_group[qid + i], + &rq_ctx_block->rq_ctxt[i]); + + msg_buf->size = SSSNIC_RQ_CTX_SIZE(max_qp); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set rq ctx, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + return -EFAULT; + } + + return 0; +} + +static int sss_nic_send_rq_ctx_to_hw(struct sss_nic_io *nic_io) +{ + int ret = 0; + u16 qid = 0; + u16 max_qp; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + while (qid < nic_io->active_qp_num) { + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + + ret = sss_nic_send_rq_ctx_by_ctrlq(nic_io, msg_buf, qid); + if (ret) { + nic_err(nic_io->dev_hdl, + "Fail to set rq ctx, qid: %u\n", qid); + break; + } + + qid += max_qp; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_reset_hw_offload_ctx(struct sss_nic_io *nic_io, + enum sss_nic_qp_ctx_type ctx_type) +{ + int ret = 0; + u64 out_param = 0; + struct sss_ctrl_msg_buf *msg_buf = NULL; + struct sss_nic_clear_q_ctx *ctx_block = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + ctx_block = msg_buf->buf; + ctx_block->ctrlq_hdr.start_qid = 0; + ctx_block->ctrlq_hdr.q_type = ctx_type; + ctx_block->ctrlq_hdr.q_num = nic_io->max_qp_num; + + sss_cpu_to_be32(ctx_block, sizeof(*ctx_block)); + + msg_buf->size = sizeof(*ctx_block); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_CLEAN_QUEUE_CONTEXT, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to clean queue offload ctxt, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + ret = -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_reset_hw_qp_offload_ctx(struct sss_nic_io *nic_io) +{ + int ret; + + ret = sss_nic_reset_hw_offload_ctx(nic_io, SSSNIC_QP_CTX_TYPE_SQ); + if (ret != 0) + return ret; + + ret = sss_nic_reset_hw_offload_ctx(nic_io, SSSNIC_QP_CTX_TYPE_RQ); + + return ret; +} + +static int sss_nic_set_hw_intr_attr(struct sss_nic_io *nic_io, u16 qid) +{ + struct sss_nic_mbx_intr_attr cmd_ci_attr = {0}; + u16 out_len = sizeof(cmd_ci_attr); + int ret; + + cmd_ci_attr.func_id = sss_get_global_func_id(nic_io->hwdev); + cmd_ci_attr.dma_attr_off = 0; + cmd_ci_attr.pending_limit = SSSNIC_DEAULT_TX_CI_PENDING_LIMIT; + cmd_ci_attr.coalescing_time = SSSNIC_DEAULT_TX_CI_COALESCING_TIME; + cmd_ci_attr.intr_en = 1; + cmd_ci_attr.intr_id = nic_io->sq_group[qid].msix_id; + cmd_ci_attr.l2nic_sqn = qid; + cmd_ci_attr.ci_addr = SSSNIC_CI_PADDR(nic_io->ci_base_daddr, qid) >> 0x2; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SQ_CI_ATTR_SET, + &cmd_ci_attr, sizeof(cmd_ci_attr), &cmd_ci_attr, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ci_attr)) { + nic_err(nic_io->dev_hdl, + "Fail to set ci attr table, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ci_attr.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_set_qp_intr_attr(struct sss_nic_io *nic_io) +{ + u16 qid; + int ret; + + for (qid = 0; qid < nic_io->active_qp_num; qid++) { + ret = sss_nic_set_hw_intr_attr(nic_io, qid); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set ci table, qid:%u\n", qid); + return ret; + } + } + + return 0; +} + +int sss_nic_init_qp_ctx(struct sss_nic_io *nic_io) +{ + u32 rq_depth; + int ret; + + ret = sss_nic_send_sq_ctx_to_hw(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to send sq ctx to hw\n"); + return ret; + } + + ret = sss_nic_send_rq_ctx_to_hw(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to send rq ctx to hw\n"); + return ret; + } + + ret = sss_nic_reset_hw_qp_offload_ctx(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to reset qp offload ctx\n"); + return ret; + } + + rq_depth = nic_io->rq_group[0].wq.q_depth << nic_io->rq_group[0].wqe_type; + ret = sss_chip_set_root_ctx(nic_io->hwdev, rq_depth, nic_io->sq_group[0].wq.q_depth, + nic_io->rx_buff_len, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set root context\n"); + return ret; + } + + ret = sss_nic_set_qp_intr_attr(nic_io); + if (ret != 0) { + sss_chip_clean_root_ctx(nic_io->hwdev, SSS_CHANNEL_NIC); + nic_err(nic_io->dev_hdl, "Fail to set ci table\n"); + } + + return ret; +} + +void sss_nic_deinit_qp_ctx(void *hwdev) +{ + if (!hwdev) + return; + sss_chip_clean_root_ctx(hwdev, SSS_CHANNEL_NIC); +} +EXPORT_SYMBOL_GPL(sss_nic_deinit_qp_ctx); diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h new file mode 100644 index 00000000000000..ab2be037dfd555 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IO_H +#define SSS_NIC_IO_H + +#include "sss_hw.h" +#include "sss_hw_wq.h" +#include "sss_nic_io_define.h" + +#define SSSNIC_RQ_WQEBB_SHIFT 3 +#define SSSNIC_CQE_SIZE_SHIFT 4 +#define SSSNIC_SQ_WQEBB_SHIFT 4 +#define SSSNIC_MIN_QUEUE_DEPTH 128 +#define SSSNIC_MAX_RX_QUEUE_DEPTH 16384 +#define SSSNIC_MAX_TX_QUEUE_DEPTH 65536 +#define SSSNIC_SQ_WQEBB_SIZE BIT(SSSNIC_SQ_WQEBB_SHIFT) + +/* ******************** DOORBELL DEFINE INFO ******************** */ +#define DB_INFO_CFLAG_SHIFT 23 +#define DB_INFO_QID_SHIFT 0 +#define DB_INFO_TYPE_SHIFT 27 +#define DB_INFO_NON_FILTER_SHIFT 22 +#define DB_INFO_COS_SHIFT 24 + +#define DB_INFO_COS_MASK 0x7U +#define DB_INFO_QID_MASK 0x1FFFU +#define DB_INFO_CFLAG_MASK 0x1U +#define DB_INFO_TYPE_MASK 0x1FU +#define DB_INFO_NON_FILTER_MASK 0x1U +#define SSSNIC_DB_INFO_SET(val, member) \ + (((u32)(val) & DB_INFO_##member##_MASK) << \ + DB_INFO_##member##_SHIFT) + +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HI_SHIFT 8 +#define SRC_TYPE 1 +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_ADDR(queue, pi) ((u64 *)((queue)->db_addr) + DB_PI_LOW(pi)) + +#define sss_nic_get_sq_local_pi(sq) SSS_WQ_MASK_ID(&(sq)->wq, (sq)->wq.pi) +#define sss_nic_get_sq_local_ci(sq) SSS_WQ_MASK_ID(&(sq)->wq, (sq)->wq.ci) +#define sss_nic_get_sq_hw_ci(sq) \ + SSS_WQ_MASK_ID(&(sq)->wq, sss_hw_cpu16(*(u16 *)(sq)->tx.ci_addr)) + +#define sss_nic_get_rq_local_pi(rq) SSS_WQ_MASK_ID(&(rq)->wq, (rq)->wq.pi) +#define sss_nic_get_rq_local_ci(rq) SSS_WQ_MASK_ID(&(rq)->wq, (rq)->wq.ci) + +/* CFLAG_DATA_PATH */ +#define RQ_CFLAG_DP 1 +#define SQ_CFLAG_DP 0 + +enum sss_nic_queue_type { + SSSNIC_SQ, + SSSNIC_RQ, + SSSNIC_MAX_QUEUE_TYPE +}; + +struct sss_nic_db { + u32 db_info; + u32 pi_hi; +}; + +enum sss_nic_rq_wqe_type { + SSSNIC_COMPACT_RQ_WQE, + SSSNIC_NORMAL_RQ_WQE, + SSSNIC_EXTEND_RQ_WQE, +}; + +int sss_nic_io_resource_init(struct sss_nic_io *nic_io); +int sss_nic_init_qp_info(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +int sss_nic_alloc_qp(struct sss_nic_io *nic_io, + struct sss_irq_desc *qp_msix_arry, struct sss_nic_qp_info *qp_info); +void sss_nic_io_resource_deinit(struct sss_nic_io *nic_io); +void sss_nic_free_qp(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +void sss_nic_deinit_qp_info(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +int sss_nic_init_qp_ctx(struct sss_nic_io *nic_io); +void sss_nic_deinit_qp_ctx(void *hwdev); + +/* * + * @brief sss_nic_write_db - write doorbell + * @param queue: nic io queue + * @param cos: cos index + * @param cflag: 0--sq, 1--rq + * @param pi: product index + */ +static inline void sss_nic_write_db(struct sss_nic_io_queue *queue, + int cos, u8 cflag, u16 pi) +{ + struct sss_nic_db doorbell; + + doorbell.db_info = SSSNIC_DB_INFO_SET(SRC_TYPE, TYPE) | SSSNIC_DB_INFO_SET(cflag, CFLAG) | + SSSNIC_DB_INFO_SET(cos, COS) | SSSNIC_DB_INFO_SET(queue->qid, QID); + doorbell.pi_hi = DB_PI_HIGH(pi); + doorbell.db_info = sss_hw_be32(doorbell.db_info); + doorbell.pi_hi = sss_hw_be32(doorbell.pi_hi); + + /* make sure write correctly db to reg */ + wmb(); + + writeq(*((u64 *)&doorbell), DB_ADDR(queue, pi)); +} + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c new file mode 100644 index 00000000000000..7c2a1d266b6249 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_irq.h" + +#define SSSNIC_AVG_PKT_SMALL_SIZE 256U + +static int sss_nic_napi_poll(struct napi_struct *napi, int budget) +{ + int tx_pkt; + int rx_pkt; + + struct sss_nic_irq_cfg *nic_irq = container_of(napi, struct sss_nic_irq_cfg, napi); + struct sss_nic_dev *nic_dev = netdev_priv(nic_irq->netdev); + + rx_pkt = sss_nic_rx_poll(nic_irq->rq, budget); + tx_pkt = sss_nic_tx_poll(nic_irq->sq, budget); + + if (tx_pkt >= budget || rx_pkt >= budget) + return budget; + + napi_complete(napi); + + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, + SSS_MSIX_ENABLE); + + return max(tx_pkt, rx_pkt); +} + +static void sss_nic_add_napi(struct sss_nic_irq_cfg *nic_irq, int budget) +{ +#ifdef NEED_NETIF_NAPI_ADD_NO_WEIGHT + netif_napi_add_weight(nic_irq->netdev, &nic_irq->napi, sss_nic_napi_poll, budget); +#else + netif_napi_add(nic_irq->netdev, &nic_irq->napi, sss_nic_napi_poll, budget); +#endif + napi_enable(&nic_irq->napi); +} + +static void sss_nic_del_napi(struct sss_nic_irq_cfg *nic_irq) +{ + napi_disable(&nic_irq->napi); + netif_napi_del(&nic_irq->napi); +} + +static irqreturn_t sss_nic_qp_irq(int irq, void *data) +{ + struct sss_nic_irq_cfg *nic_irq = (struct sss_nic_irq_cfg *)data; + struct sss_nic_dev *nic_dev = netdev_priv(nic_irq->netdev); + + sss_chip_clear_msix_resend_bit(nic_dev->hwdev, nic_irq->msix_id, 1); + + napi_schedule(&nic_irq->napi); + + return IRQ_HANDLED; +} + +static int sss_nic_request_irq(struct sss_nic_dev *nic_dev, u16 qid) +{ + int ret; + struct sss_irq_cfg irq_cfg = {0}; + struct sss_nic_irq_cfg *nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + + sss_nic_add_napi(nic_irq, nic_dev->poll_budget); + + irq_cfg.coalesc_intr_set = 1; + irq_cfg.msix_id = nic_irq->msix_id; + irq_cfg.pending = nic_dev->coal_info[qid].pending_limt; + irq_cfg.coalesc_timer = + nic_dev->coal_info[qid].coalesce_timer; + irq_cfg.resend_timer = nic_dev->coal_info[qid].resend_timer; + nic_dev->rq_desc_group[qid].last_coal_timer = + nic_dev->coal_info[qid].coalesce_timer; + nic_dev->rq_desc_group[qid].last_pending_limt = + nic_dev->coal_info[qid].pending_limt; + ret = sss_chip_set_msix_attr(nic_dev->hwdev, irq_cfg, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to set rx msix attr.\n"); + goto out; + } + + ret = request_irq(nic_irq->irq_id, &sss_nic_qp_irq, 0, nic_irq->irq_name, nic_irq); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_irq->netdev, "Fail to request rx irq\n"); + goto out; + } + + irq_set_affinity_hint(nic_irq->irq_id, &nic_irq->affinity_mask); + + return 0; + +out: + sss_nic_del_napi(nic_irq); + return ret; +} + +static void sss_nic_release_irq(struct sss_nic_irq_cfg *nic_irq) +{ + irq_set_affinity_hint(nic_irq->irq_id, NULL); + synchronize_irq(nic_irq->irq_id); + free_irq(nic_irq->irq_id, nic_irq); + sss_nic_del_napi(nic_irq); +} + +static int sss_nic_set_hw_coal(struct sss_nic_dev *nic_dev, + u16 qid, u8 coal_timer_cfg, u8 pending_limt) +{ + int ret; + struct sss_irq_cfg cmd_irq_cfg = {0}; + + cmd_irq_cfg.coalesc_intr_set = 1; + cmd_irq_cfg.msix_id = nic_dev->qp_res.irq_cfg[qid].msix_id; + cmd_irq_cfg.pending = pending_limt; + cmd_irq_cfg.coalesc_timer = coal_timer_cfg; + cmd_irq_cfg.resend_timer = + nic_dev->coal_info[qid].resend_timer; + + ret = sss_chip_set_msix_attr(nic_dev->hwdev, cmd_irq_cfg, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to modify moderation for Queue: %u\n", qid); + return ret; + } + + return 0; +} + +static void sss_nic_calculate_intr_coal(struct sss_nic_intr_coal_info *coal_info, + u64 rx_rate, u8 *coal_timer_cfg, u8 *pending_limt) +{ + if (rx_rate < coal_info->pkt_rate_low) { + *pending_limt = coal_info->rx_pending_limt_low; + *coal_timer_cfg = coal_info->rx_usecs_low; + } else if (rx_rate > coal_info->pkt_rate_high) { + *pending_limt = coal_info->rx_pending_limt_high; + *coal_timer_cfg = coal_info->rx_usecs_high; + } else { + u8 rx_pending_limt = coal_info->rx_pending_limt_high - + coal_info->rx_pending_limt_low; + u8 rx_usecs = coal_info->rx_usecs_high - coal_info->rx_usecs_low; + u64 rx_rate_diff = rx_rate - coal_info->pkt_rate_low; + u64 pkt_rate = coal_info->pkt_rate_high - coal_info->pkt_rate_low; + + *pending_limt = (u8)(rx_rate_diff * rx_pending_limt / pkt_rate + + coal_info->rx_pending_limt_low); + *coal_timer_cfg = (u8)(rx_rate_diff * rx_usecs / pkt_rate + + coal_info->rx_usecs_low); + } +} + +static void sss_nic_update_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, u64 rx_rate, u64 tx_rate, u64 avg_pkt_size) +{ + u8 pending_limt; + u8 coal_timer_cfg; + struct sss_nic_intr_coal_info *coal_info = NULL; + + coal_info = &nic_dev->coal_info[qid]; + + if (rx_rate > SSSNIC_RX_RATE_THRESH && avg_pkt_size > SSSNIC_AVG_PKT_SMALL_SIZE) { + sss_nic_calculate_intr_coal(coal_info, rx_rate, &coal_timer_cfg, &pending_limt); + } else { + pending_limt = coal_info->rx_pending_limt_low; + coal_timer_cfg = SSSNIC_LOWEST_LATENCY; + } + + if (coal_timer_cfg == nic_dev->rq_desc_group[qid].last_coal_timer && + pending_limt == nic_dev->rq_desc_group[qid].last_pending_limt) + return; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || qid >= nic_dev->qp_res.qp_num) + return; + + (void)sss_nic_set_hw_coal(nic_dev, qid, coal_timer_cfg, pending_limt); + + nic_dev->rq_desc_group[qid].last_pending_limt = pending_limt; + nic_dev->rq_desc_group[qid].last_coal_timer = coal_timer_cfg; +} + +static void sss_nic_adjust_coal_work(struct work_struct *work) +{ + u16 qid; + u64 avg_pkt_size; + u64 tx_pkts; + u64 tx_rate; + u64 rx_bytes; + u64 rx_pkts; + u64 rx_rate; + struct delayed_work *delay = to_delayed_work(work); + struct sss_nic_dev *nic_dev = + container_of(delay, struct sss_nic_dev, moderation_task); + unsigned long period; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) + return; + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + period = (unsigned long)(jiffies - nic_dev->last_jiffies); + + if (nic_dev->use_adaptive_rx_coalesce == 0 || period == 0) + return; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + rx_bytes = nic_dev->rq_desc_group[qid].stats.rx_bytes - + nic_dev->rq_desc_group[qid].last_rx_bytes; + rx_pkts = nic_dev->rq_desc_group[qid].stats.rx_packets - + nic_dev->rq_desc_group[qid].last_rx_pkts; + avg_pkt_size = (rx_pkts != 0) ? (rx_bytes / rx_pkts) : 0; + rx_rate = rx_pkts * HZ / period; + + tx_pkts = nic_dev->sq_desc_group[qid].stats.tx_packets - + nic_dev->sq_desc_group[qid].last_tx_pkts; + tx_rate = tx_pkts * HZ / period; + + nic_dev->rq_desc_group[qid].last_rx_bytes = + nic_dev->rq_desc_group[qid].stats.rx_bytes; + nic_dev->rq_desc_group[qid].last_rx_pkts = + nic_dev->rq_desc_group[qid].stats.rx_packets; + nic_dev->sq_desc_group[qid].last_tx_bytes = + nic_dev->sq_desc_group[qid].stats.tx_bytes; + nic_dev->sq_desc_group[qid].last_tx_pkts = + nic_dev->sq_desc_group[qid].stats.tx_packets; + + sss_nic_update_intr_coal(nic_dev, qid, rx_rate, tx_rate, avg_pkt_size); + } + + nic_dev->last_jiffies = jiffies; +} + +static void sss_nic_dev_irq_cfg_init(struct sss_nic_dev *nic_dev, u16 qid) +{ + struct sss_irq_desc *irq_desc = &nic_dev->irq_desc_group[qid]; + struct sss_nic_irq_cfg *nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + + nic_irq->netdev = nic_dev->netdev; + nic_irq->msix_id = irq_desc->msix_id; + nic_irq->irq_id = irq_desc->irq_id; + nic_irq->sq = &nic_dev->sq_desc_group[qid]; + nic_irq->rq = &nic_dev->rq_desc_group[qid]; + nic_dev->rq_desc_group[qid].irq_cfg = nic_irq; +} + +static void __sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev, + struct sss_nic_irq_cfg *nic_irq) +{ + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, SSS_MSIX_DISABLE); + sss_chip_set_msix_auto_mask(nic_dev->hwdev, + nic_irq->msix_id, SSS_CLR_MSIX_AUTO_MASK); + sss_nic_release_irq(nic_irq); +} + +int sss_nic_request_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 i; + u16 qid; + u32 cpuid; + int ret; + struct sss_nic_irq_cfg *nic_irq = NULL; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + sss_nic_dev_irq_cfg_init(nic_dev, qid); + + cpuid = cpumask_local_spread(qid, dev_to_node(nic_dev->dev_hdl)); + cpumask_set_cpu(cpuid, &nic_irq->affinity_mask); + + ret = snprintf(nic_irq->irq_name, sizeof(nic_irq->irq_name), + "%s_qp%u", nic_dev->netdev->name, qid); + if (ret < 0) { + ret = -EINVAL; + goto out; + } + + ret = sss_nic_request_irq(nic_dev, qid); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to request rx irq\n"); + goto out; + } + + sss_chip_set_msix_auto_mask(nic_dev->hwdev, nic_irq->msix_id, + SSS_SET_MSIX_AUTO_MASK); + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, + SSS_MSIX_ENABLE); + } + + INIT_DELAYED_WORK(&nic_dev->moderation_task, sss_nic_adjust_coal_work); + + return 0; + +out: + for (i = 0; i < qid; i++) + __sss_nic_release_qp_irq(nic_dev, &nic_dev->qp_res.irq_cfg[i]); + + return ret; +} + +void sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 qid; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + __sss_nic_release_qp_irq(nic_dev, &nic_dev->qp_res.irq_cfg[qid]); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h new file mode 100644 index 00000000000000..9731e347129359 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IRQ_H +#define SSS_NIC_IRQ_H + +#include + +#include "sss_kernel.h" +#include "sss_nic_dev_define.h" + +int sss_nic_request_qp_irq(struct sss_nic_dev *nic_dev); +void sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c new file mode 100644 index 00000000000000..c11ec5a24515ba --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_event.h" + +struct sss_nic_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +#define SSSNIC_LOOP_MODE_MIN 1 +#define SSSNIC_LOOP_MODE_MAX 6 + +#define SSSNIC_LOOP_MODE_IS_INVALID(mode) \ + (unlikely(((mode) > SSSNIC_LOOP_MODE_MAX) || ((mode) < SSSNIC_LOOP_MODE_MIN))) + +#define SSSNIC_LINK_INFO_VALID 1 + +static int sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size); +static int sss_nic_mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size, u16 channel); + +int sss_nic_set_hw_port_state(struct sss_nic_dev *nic_dev, bool enable, u16 channel) +{ + struct sss_nic_mbx_set_port_mag_state port_state = {0}; + u16 out_len = sizeof(port_state); + int ret; + + if (!nic_dev) + return -EINVAL; + + if (sss_get_func_type(nic_dev->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + port_state.state = enable ? (SSSNIC_MAG_OPCODE_TX_ENABLE | SSSNIC_MAG_OPCODE_RX_ENABLE) : + SSSNIC_MAG_OPCODE_PORT_DISABLE; + port_state.function_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_PORT_ENABLE, + &port_state, sizeof(port_state), + &port_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &port_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set port state, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, port_state.head.state, out_len, channel); + return -EIO; + } + + return 0; +} + +int sss_nic_get_phy_port_stats(struct sss_nic_dev *nic_dev, struct sss_nic_mag_port_stats *stats) +{ + struct sss_nic_mbx_mag_port_stats_info stats_info = {0}; + struct sss_nic_mbx_mag_port_stats *port_stats = NULL; + u16 out_len = sizeof(*port_stats); + int ret; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + stats_info.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_PORT_STAT, + &stats_info, sizeof(stats_info), + port_stats, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, port_stats)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port statistics, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, port_stats->head.state, out_len); + ret = -EIO; + goto out; + } + + memcpy(stats, &port_stats->counter, sizeof(*stats)); + +out: + kfree(port_stats); + + return ret; +} + +int sss_nic_set_autoneg(struct sss_nic_dev *nic_dev, bool enable) +{ + struct sss_nic_link_ksettings settings = {0}; + u32 valid_bitmap = 0; + + valid_bitmap |= SSSNIC_LINK_SET_AUTONEG; + settings.valid_bitmap = valid_bitmap; + settings.autoneg = enable ? SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF; + + return sss_nic_set_link_settings(nic_dev, &settings); +} + +static int sss_nic_cfg_loopback_mode(struct sss_nic_dev *nic_dev, u8 opcode, + u8 *mode, u8 *enable) +{ + struct sss_nic_mbx_loopback_mode loopback_mode = {0}; + u16 out_len = sizeof(loopback_mode); + int ret; + + if (opcode == SSS_MGMT_MSG_SET_CMD) { + loopback_mode.mode = *mode; + loopback_mode.en = *enable; + } + loopback_mode.opcode = opcode; + loopback_mode.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_CFG_LOOPBACK_MODE, + &loopback_mode, sizeof(loopback_mode), + &loopback_mode, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &loopback_mode)) { + nic_err(nic_dev->dev_hdl, + "Fail to %s loopback mode, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSS_MGMT_MSG_SET_CMD ? "set" : "get", + ret, loopback_mode.head.state, out_len); + return -EIO; + } + + if (opcode == SSS_MGMT_MSG_GET_CMD) { + *enable = loopback_mode.en; + *mode = loopback_mode.mode; + } + + return 0; +} + +int sss_nic_set_loopback_mode(struct sss_nic_dev *nic_dev, u8 lp_mode, u8 enable) +{ + if (SSSNIC_LOOP_MODE_IS_INVALID(lp_mode)) { + nic_err(nic_dev->dev_hdl, "Invalid loopback mode %u to set\n", + lp_mode); + return -EINVAL; + } + + return sss_nic_cfg_loopback_mode(nic_dev, SSS_MGMT_MSG_SET_CMD, &lp_mode, &enable); +} + +int sss_nic_get_loopback_mode(struct sss_nic_dev *nic_dev, u8 *mode, u8 *enable) +{ + if (!nic_dev || !mode || !enable) + return -EINVAL; + + return sss_nic_cfg_loopback_mode(nic_dev, SSS_MGMT_MSG_GET_CMD, mode, + enable); +} + +int sss_nic_set_hw_led_state(struct sss_nic_dev *nic_dev, enum sss_nic_mag_led_type led_type, + enum sss_nic_mag_led_mode led_mode) +{ + struct sss_nic_mbx_set_led_cfg led_info = {0}; + u16 out_len = sizeof(led_info); + int ret; + + led_info.mode = led_mode; + led_info.type = led_type; + led_info.function_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_LED_CFG, + &led_info, sizeof(led_info), &led_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &led_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to set led state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, led_info.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_hw_port_info(struct sss_nic_dev *nic_dev, + struct sss_nic_port_info *port_info, u16 channel) +{ + struct sss_nic_mbx_get_port_info mbx_port_info = {0}; + u16 out_len = sizeof(mbx_port_info); + int ret; + + if (!nic_dev || !port_info) + return -EINVAL; + + mbx_port_info.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_PORT_INFO, + &mbx_port_info, sizeof(mbx_port_info), + &mbx_port_info, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &mbx_port_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port info, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, mbx_port_info.head.state, out_len, channel); + return -EIO; + } + + port_info->advertised_mode = mbx_port_info.advertised_mode; + port_info->duplex = mbx_port_info.duplex; + port_info->autoneg_cap = mbx_port_info.an_support; + port_info->fec = mbx_port_info.fec; + port_info->autoneg_state = mbx_port_info.an_en; + port_info->port_type = mbx_port_info.wire_type; + port_info->supported_mode = mbx_port_info.supported_mode; + port_info->speed = mbx_port_info.speed; + + return 0; +} + +int sss_nic_set_link_settings(struct sss_nic_dev *nic_dev, + struct sss_nic_link_ksettings *settings) +{ + struct sss_nic_mbx_mag_set_port_cfg port_cfg = {0}; + u16 out_len = sizeof(port_cfg); + int ret; + + port_cfg.autoneg = settings->autoneg; + port_cfg.port_id = sss_get_phy_port_id(nic_dev->hwdev); + port_cfg.fec = settings->fec; + port_cfg.config_bitmap = settings->valid_bitmap; + port_cfg.speed = settings->speed; + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_PORT_CFG, + &port_cfg, sizeof(port_cfg), &port_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &port_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set link settings, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, port_cfg.head.state, out_len); + return -EIO; + } + + return port_cfg.head.state; +} + +int sss_nic_get_hw_link_state(struct sss_nic_dev *nic_dev, u8 *out_state) +{ + struct sss_nic_mbx_get_link_state link_state = {0}; + u16 out_len = sizeof(link_state); + int ret; + + if (!nic_dev || !out_state) + return -EINVAL; + + link_state.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_LINK_STATUS, + &link_state, sizeof(link_state), &link_state, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &link_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to get link state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, link_state.head.state, out_len); + return -EIO; + } + + *out_state = link_state.status; + + return 0; +} + +void sss_nic_notify_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, u8 state) +{ + struct sss_nic_mbx_get_link_state link_state = {0}; + u16 out_len = sizeof(link_state); + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + int ret; + + link_state.status = state; + link_state.port_id = sss_get_phy_port_id(nic_io->hwdev); + ret = sss_mbx_send_to_vf(nic_io->hwdev, vf_id, SSS_MOD_TYPE_SSSLINK, + SSSNIC_MAG_OPCODE_LINK_STATUS, + &link_state, sizeof(link_state), + &link_state, &out_len, 0, SSS_CHANNEL_NIC); + if (ret == SSS_MBX_ERRCODE_UNKNOWN_DES_FUNC) { + sss_nic_dettach_vf(nic_io, vf_id); + nic_warn(nic_io->dev_hdl, "VF %d not initialize, need to disconnect it\n", id); + } else if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &link_state)) { + nic_err(nic_io->dev_hdl, + "Fail to send VF %d the link state change event, ret:%d, state:0x%x, out_len:0x%x\n", + id, ret, link_state.head.state, out_len); + } +} + +void sss_nic_notify_all_vf_link_state(struct sss_nic_io *nic_io, u8 state) +{ + struct sss_nic_vf_info *vf_info = NULL; + u16 vf_id; + + nic_io->link_status = state; + for (vf_id = 1; vf_id <= nic_io->max_vf_num; vf_id++) { + vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + if (vf_info->link_forced || !vf_info->attach) + continue; + sss_nic_notify_vf_link_state(nic_io, vf_id, state); + } +} + +static int sss_nic_get_vf_link_status_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *buf_in, u16 in_len, + void *buf_out, u16 *out_len) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_mbx_get_link_state *link_state = buf_out; + struct sss_nic_vf_info *vf_info_group = nic_io->vf_info_group; + bool link_up = vf_info_group[id].link_up; + bool link_forced = vf_info_group[id].link_forced; + + if (link_forced) + link_state->status = link_up ? SSSNIC_LINK_UP : SSSNIC_LINK_DOWN; + else + link_state->status = nic_io->link_status; + + link_state->head.state = SSS_MGMT_CMD_SUCCESS; + *out_len = sizeof(*link_state); + + return 0; +} + +static void sss_nic_get_link_info(struct sss_nic_io *nic_io, + const struct sss_nic_mbx_get_link_state *link_state, + struct sss_nic_event_link_info *link_info) +{ + struct sss_nic_port_info port_info = {0}; + int ret; + + /* link event reported only after set vport enable */ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF || + link_state->status == SSSNIC_LINK_DOWN) + return; + + ret = sss_nic_get_hw_port_info(nic_io->nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_warn(nic_io->dev_hdl, "Fail to get port info\n"); + return; + } + + link_info->valid = SSSNIC_LINK_INFO_VALID; + link_info->duplex = port_info.duplex; + link_info->port_type = port_info.port_type; + link_info->speed = port_info.speed; + link_info->autoneg_state = port_info.autoneg_state; + link_info->autoneg_cap = port_info.autoneg_cap; +} + +static void sss_nic_link_status_event_handler(struct sss_nic_io *nic_io, + void *buf_in, u16 in_len, + void *buf_out, u16 *out_len) +{ + struct sss_nic_mbx_get_link_state *in_link_state = buf_in; + struct sss_nic_mbx_get_link_state *out_link_state = buf_out; + struct sss_event_info event_info = {0}; + struct sss_nic_event_link_info *link_info = (void *)event_info.event_data; + + nic_info(nic_io->dev_hdl, "Link status report received, func_id: %u, status: %u\n", + sss_get_global_func_id(nic_io->hwdev), in_link_state->status); + + sss_update_link_stats(nic_io->hwdev, in_link_state->status); + + sss_nic_get_link_info(nic_io, in_link_state, link_info); + + event_info.type = (in_link_state->status == SSSNIC_LINK_DOWN) ? + SSSNIC_EVENT_LINK_DOWN : SSSNIC_EVENT_LINK_UP; + event_info.service = SSS_EVENT_SRV_NIC; + sss_do_event_callback(nic_io->hwdev, &event_info); + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return; + + *out_len = sizeof(*out_link_state); + out_link_state->head.state = SSS_MGMT_CMD_SUCCESS; + sss_nic_notify_all_vf_link_state(nic_io, in_link_state->status); +} + +static void sss_nic_cable_plug_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mag_wire_event *in_wire_event = in_buf; + struct sss_nic_mag_wire_event *out_wire_event = out_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + struct sss_event_info event_info = {0}; + struct sss_nic_port_module_event *module_event = (void *)event_info.event_data; + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_info = false; + routine_cmd->mpu_send_sfp_abs = false; + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); + + *out_size = sizeof(*out_wire_event); + out_wire_event->head.state = SSS_MGMT_CMD_SUCCESS; + + event_info.service = SSS_EVENT_SRV_NIC; + event_info.type = SSSNIC_EVENT_PORT_MODULE_EVENT; + module_event->type = (in_wire_event->status != SSNSIC_PORT_PRESENT) ? + SSSNIC_PORT_MODULE_CABLE_PLUGGED : SSSNIC_PORT_MODULE_CABLE_UNPLUGGED; + + sss_do_event_callback(nic_io->hwdev, &event_info); +} + +static void sss_nic_port_sfp_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_get_xsfp_info *in_xsfp_info = in_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (in_size != sizeof(*in_xsfp_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*in_xsfp_info)); + return; + } + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_info = true; + memcpy(&routine_cmd->std_sfp_info, in_xsfp_info, sizeof(*in_xsfp_info)); + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); +} + +static void sss_nic_port_sfp_absent_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_get_xsfp_present *in_xsfp_present = in_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (in_size != sizeof(*in_xsfp_present)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*in_xsfp_present)); + return; + } + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_abs = true; + memcpy(&routine_cmd->abs, in_xsfp_present, sizeof(*in_xsfp_present)); + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); +} + +bool sss_nic_if_sfp_absent(struct sss_nic_dev *nic_dev) +{ + int ret; + bool sfp_abs_state; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + u8 port_id = sss_get_phy_port_id(nic_dev->hwdev); + struct sss_nic_mbx_get_xsfp_present xsfp_present = {0}; + u16 out_len = sizeof(xsfp_present); + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_abs) { + if (routine_cmd->abs.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return true; + } + + sfp_abs_state = (bool)routine_cmd->abs.abs_status; + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return sfp_abs_state; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + xsfp_present.port_id = port_id; + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT, + &xsfp_present, sizeof(xsfp_present), &xsfp_present, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &xsfp_present)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port%u sfp absent status, ret: %d, status: 0x%x, out_len: 0x%x\n", + port_id, ret, xsfp_present.head.state, out_len); + return true; + } + + return !!xsfp_present.abs_status; +} + +int sss_nic_get_sfp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_mbx_get_xsfp_info *xsfp_info) +{ + int ret; + u16 out_len = sizeof(*xsfp_info); + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (!nic_dev || !xsfp_info) + return -EINVAL; + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_info) { + if (routine_cmd->std_sfp_info.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return -EIO; + } + + memcpy(xsfp_info, &routine_cmd->std_sfp_info, sizeof(*xsfp_info)); + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return 0; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + xsfp_info->port_id = sss_get_phy_port_id(nic_dev->hwdev); + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_XSFP_INFO, + xsfp_info, sizeof(*xsfp_info), xsfp_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, xsfp_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port%u sfp eeprom information, ret: %d, status: 0x%x, out_len: 0x%x\n", + sss_get_phy_port_id(nic_dev->hwdev), ret, + xsfp_info->head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_sfp_eeprom(struct sss_nic_dev *nic_dev, u8 *data, u32 len) +{ + struct sss_nic_mbx_get_xsfp_info xsfp_info = {0}; + int ret; + + if (!nic_dev || !data) + return -EINVAL; + + if (sss_nic_if_sfp_absent(nic_dev)) + return -ENXIO; + + ret = sss_nic_get_sfp_info(nic_dev, &xsfp_info); + if (ret != 0) + return ret; + + memcpy(data, xsfp_info.sfp_info, len); + + return 0; +} + +int sss_nic_get_sfp_type(struct sss_nic_dev *nic_dev, u8 *sfp_type, u8 *sfp_type_ext) +{ + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + u8 sfp_data[SSSNIC_STD_SFP_INFO_MAX_SIZE]; + int ret; + + if (!nic_dev || !sfp_type || !sfp_type_ext) + return -EINVAL; + + if (sss_nic_if_sfp_absent(nic_dev)) + return -ENXIO; + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_info) { + if (routine_cmd->std_sfp_info.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return -EIO; + } + + *sfp_type_ext = routine_cmd->std_sfp_info.sfp_info[1]; + *sfp_type = routine_cmd->std_sfp_info.sfp_info[0]; + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return 0; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + ret = sss_nic_get_sfp_eeprom(nic_dev, (u8 *)sfp_data, SSSNIC_STD_SFP_INFO_MAX_SIZE); + if (ret != 0) + return ret; + + *sfp_type = sfp_data[0]; + *sfp_type_ext = sfp_data[1]; + + return 0; +} + +int sss_nic_set_link_follow_state(struct sss_nic_dev *nic_dev, + enum sss_nic_link_follow_status state) +{ + int ret; + struct sss_nic_mbx_set_link_follow link_follow = {0}; + u16 out_len = sizeof(link_follow); + + link_follow.function_id = sss_get_global_func_id(nic_dev->hwdev); + link_follow.follow = state; + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_LINK_FOLLOW, + &link_follow, sizeof(link_follow), + &link_follow, &out_len); + if ((link_follow.head.state != SSS_MGMT_CMD_UNSUPPORTED && link_follow.head.state != 0) || + ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set link status follow, ret: %d, state: 0x%x, out size: 0x%x\n", + ret, link_follow.head.state, out_len); + return -EFAULT; + } + + return link_follow.head.state; +} + +static const struct sss_nic_vf_msg_handler g_sss_nic_vf_mag_cmd_proc[] = { + { + .opcode = SSSNIC_MAG_OPCODE_LINK_STATUS, + .msg_handler = sss_nic_get_vf_link_status_handler, + }, +}; + +static const struct sss_nic_vf_msg_handler *sss_nic_get_vf_mag_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_sss_nic_vf_mag_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_sss_nic_vf_mag_cmd_proc[i].opcode == opcode) + return &g_sss_nic_vf_mag_cmd_proc[i]; + + return NULL; +} + +/* pf/ppf handler mbx msg from vf */ +int sss_nic_pf_mag_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + const struct sss_nic_vf_msg_handler *handler = NULL; + struct sss_nic_io *nic_io; + + if (!hwdev) + return -EFAULT; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + handler = sss_nic_get_vf_mag_cmd_proc(cmd); + if (handler) + return handler->msg_handler(nic_io, vf_id, + in_buf, in_size, out_buf, out_size); + + nic_warn(nic_io->dev_hdl, "NO function found for mag cmd: %u received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +static struct nic_event_handler g_sss_nic_mag_cmd_proc[] = { + { + .opcode = SSSNIC_MAG_OPCODE_LINK_STATUS, + .event_handler = sss_nic_link_status_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_WIRE_EVENT, + .event_handler = sss_nic_cable_plug_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_GET_XSFP_INFO, + .event_handler = sss_nic_port_sfp_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT, + .event_handler = sss_nic_port_sfp_absent_event_handler, + }, +}; + +static const struct nic_event_handler *sss_nic_get_mag_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_sss_nic_mag_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_sss_nic_mag_cmd_proc[i].opcode == opcode) + return &g_sss_nic_mag_cmd_proc[i]; + + return NULL; +} + +static int _sss_nic_mag_event_handler(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + const struct nic_event_handler *handler = NULL; + struct sss_nic_io *nic_io = NULL; + struct sss_mgmt_msg_head *out_msg_head = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + *out_size = 0; + + handler = sss_nic_get_mag_cmd_proc(cmd); + if (handler) { + handler->event_handler(nic_io, in_buf, in_size, out_buf, out_size); + return 0; + } + + out_msg_head = out_buf; + out_msg_head->state = SSS_MGMT_CMD_UNSUPPORTED; + *out_size = sizeof(*out_msg_head); + + nic_warn(nic_io->dev_hdl, "Invalid mag event cmd: %u\n", cmd); + + return 0; +} + +int sss_nic_vf_mag_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_mag_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +/* pf/ppf handler mgmt cpu report ssslink event */ +void sss_nic_pf_mag_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + _sss_nic_mag_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +static int _sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + if (sss_nic_get_vf_mag_cmd_proc(cmd)) + return sss_mbx_send_to_pf(hwdev, SSS_MOD_TYPE_SSSLINK, cmd, + in_buf, in_size, out_buf, out_size, 0, channel); + + return sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_SSSLINK, + cmd, in_buf, in_size, out_buf, out_size, 0, channel); +} + +static int sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + return _sss_nic_mag_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, + out_buf, out_size, SSS_CHANNEL_NIC); +} + +static int sss_nic_mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + return _sss_nic_mag_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, + out_buf, out_size, channel); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h new file mode 100644 index 00000000000000..ef112925cf5054 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_MAG_CFG_H +#define SSS_NIC_MAG_CFG_H + +#include + +#include "sss_nic_cfg_mag_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_dev_define.h" + +enum port_module_event_type { + SSSNIC_PORT_MODULE_CABLE_PLUGGED, + SSSNIC_PORT_MODULE_CABLE_UNPLUGGED, + SSSNIC_PORT_MODULE_LINK_ERR, + SSSNIC_PORT_MODULE_MAX_EVENT, +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +struct sss_nic_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +int sss_nic_set_hw_port_state(struct sss_nic_dev *nic_dev, bool enable, u16 channel); + +int sss_nic_get_hw_link_state(struct sss_nic_dev *nic_dev, u8 *link_state); + +void sss_nic_notify_all_vf_link_state(struct sss_nic_io *nic_io, u8 link_status); + +int sss_nic_get_hw_port_info(struct sss_nic_dev *nic_dev, struct sss_nic_port_info *port_info, + u16 channel); + +int sss_nic_get_phy_port_stats(struct sss_nic_dev *nic_dev, struct sss_nic_mag_port_stats *stats); + +int sss_nic_set_link_settings(struct sss_nic_dev *nic_dev, + struct sss_nic_link_ksettings *settings); + +int sss_nic_set_hw_led_state(struct sss_nic_dev *nic_dev, enum sss_nic_mag_led_type type, + enum sss_nic_mag_led_mode mode); + +int sss_nic_set_loopback_mode(struct sss_nic_dev *nic_dev, u8 mode, u8 enable); + +int sss_nic_set_autoneg(struct sss_nic_dev *nic_dev, bool enable); + +int sss_nic_get_sfp_type(struct sss_nic_dev *nic_dev, u8 *sfp_type, u8 *sfp_type_ext); +int sss_nic_get_sfp_eeprom(struct sss_nic_dev *nic_dev, u8 *data, u32 len); + +int sss_nic_set_link_follow_state(struct sss_nic_dev *nic_dev, + enum sss_nic_link_follow_status status); + +void sss_nic_notify_vf_link_state(struct sss_nic_io *nic_io, + u16 vf_id, u8 link_status); + +int sss_nic_vf_mag_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +void sss_nic_pf_mag_event_handler(void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int sss_nic_pf_mag_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int sss_nic_get_sfp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_mbx_get_xsfp_info *xsfp_info); + +bool sss_nic_if_sfp_absent(struct sss_nic_dev *nic_dev); + +int sss_nic_get_loopback_mode(struct sss_nic_dev *nic_dev, u8 *mode, u8 *enable); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c new file mode 100644 index 00000000000000..3c432ceba1354b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c @@ -0,0 +1,1073 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_tx_init.h" +#include "sss_nic_rx.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_rx_reset.h" +#include "sss_nic_rss.h" +#include "sss_nic_dcb.h" +#include "sss_nic_ethtool.h" +#include "sss_nic_filter.h" +#include "sss_nic_netdev_ops.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_ntuple.h" +#include "sss_nic_event.h" +#include "sss_tool_nic_func.h" + +#define DEFAULT_POLL_BUDGET 64 +static u32 poll_budget = DEFAULT_POLL_BUDGET; +module_param(poll_budget, uint, 0444); +MODULE_PARM_DESC(poll_budget, "Number packets for NAPI budget (default=64)"); + +#define SSSNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define SSSNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 25 +#define SSSNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 + +static u8 msix_pending_limit = SSSNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; +module_param(msix_pending_limit, byte, 0444); +MODULE_PARM_DESC(msix_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); + +static u8 msix_coalesc_timer = + SSSNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; +module_param(msix_coalesc_timer, byte, 0444); +MODULE_PARM_DESC(msix_coalesc_timer, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=25)"); + +#define DEFAULT_RX_BUFF_LEN 2 +u16 rx_buff_size = DEFAULT_RX_BUFF_LEN; +module_param(rx_buff_size, ushort, 0444); +MODULE_PARM_DESC(rx_buff_size, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); + +static u32 rx_poll_wqe = 256; +module_param(rx_poll_wqe, uint, 0444); +MODULE_PARM_DESC(rx_poll_wqe, "Number wqe for rx poll (default=256)"); + +static u8 link_follow_status = SSSNIC_LINK_FOLLOW_STATUS_MAX; +module_param(link_follow_status, byte, 0444); +MODULE_PARM_DESC(link_follow_status, "Set link follow status port status (0=default,1=follow,2=separate,3=unset"); + +#define SSSNIC_DEV_WQ_NAME "sssnic_dev_wq" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) + +#define QID_MASKED(qid, nic_dev) ((qid) & ((nic_dev)->qp_num - 1)) +#define WATCHDOG_TIMEOUT 5 + +#define SSSNIC_SQ_DEPTH 1024 +#define SSSNIC_RQ_DEPTH 1024 + +enum sss_nic_rx_buff_len { + RX_BUFF_VALID_2KB = 2, + RX_BUFF_VALID_4KB = 4, + RX_BUFF_VALID_8KB = 8, + RX_BUFF_VALID_16KB = 16, +}; + +#define CONVERT_UNIT 1024 +#define RX_BUFF_TO_BYTES(size) ((u16)((size) * CONVERT_UNIT)) +#define RX_BUFF_NUM_PER_PAGE 2 +#define RX_BUFF_TO_DMA_SIZE(rx_buff_len) (RX_BUFF_NUM_PER_PAGE * (rx_buff_len)) +#define DMA_SIZE_TO_PAGE_NUM(buff_size) ((buff_size) / PAGE_SIZE) +#define PAGE_NUM_TO_ORDER(page_num) ((page_num) > 0 ? ilog2(page_num) : 0) +#define BUFF_SIZE_TO_PAGE_ORDER(buff_size) PAGE_NUM_TO_ORDER(DMA_SIZE_TO_PAGE_NUM(buff_size)) + +#define POLL_BUDGET_IS_VALID(budget) ((budget) <= SSSNIC_MAX_RX_QUEUE_DEPTH) + +#define SSSNIC_NETDEV_DEFAULT_FEATURE (NETIF_F_SG | NETIF_F_HIGHDMA) + +#define SSSNIC_LP_PKT_LEN 60 + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + +#define SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1 +#define SSSNIC_VLAN_CLEAR_OFFLOAD (~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ + NETIF_F_ALL_TSO)) + +#define SSSNIC_DRV_DESC "Intelligent Network Interface Card Driver" + +static int sss_nic_netdev_event_handler(struct notifier_block *notifier, + unsigned long event, void *ptr); +typedef void (*sss_nic_port_module_event_handler_t)(struct sss_nic_dev *nic_dev, void *event_data); + +static DEFINE_MUTEX(g_netdev_notifier_mutex); +static int g_netdev_notifier_ref_cnt; + +typedef void (*sss_nic_event_handler_t)(struct sss_nic_dev *nic_dev, struct sss_event_info *event); + +static struct notifier_block g_netdev_notifier = { + .notifier_call = sss_nic_netdev_event_handler, +}; + +static void sss_nic_register_notifier(struct sss_nic_dev *nic_dev) +{ + int ret; + + mutex_lock(&g_netdev_notifier_mutex); + g_netdev_notifier_ref_cnt++; + if (g_netdev_notifier_ref_cnt == 1) { + ret = register_netdevice_notifier(&g_netdev_notifier); + if (ret != 0) { + nic_info(nic_dev->dev_hdl, + "Fail to register netdevice notifier, ret: %d\n", ret); + g_netdev_notifier_ref_cnt--; + } + } + mutex_unlock(&g_netdev_notifier_mutex); +} + +static void sss_nic_unregister_notifier(struct sss_nic_dev *nic_dev) +{ + mutex_lock(&g_netdev_notifier_mutex); + if (g_netdev_notifier_ref_cnt == 1) + unregister_netdevice_notifier(&g_netdev_notifier); + + if (g_netdev_notifier_ref_cnt > 0) + g_netdev_notifier_ref_cnt--; + mutex_unlock(&g_netdev_notifier_mutex); +} + +#if IS_ENABLED(CONFIG_VLAN_8021Q) +static u16 sss_nic_get_vlan_depth(struct net_device *dev) +{ + u16 vlan_depth = 0; + struct net_device *vlan_dev = dev; + + do { + vlan_depth++; + vlan_dev = vlan_dev_priv(vlan_dev)->real_dev; + } while (is_vlan_dev(vlan_dev)); + + return vlan_depth; +} + +static void sss_nic_clear_netdev_vlan_offload(struct net_device *dev, u16 vlan_depth) +{ + if (vlan_depth == SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + dev->vlan_features &= SSSNIC_VLAN_CLEAR_OFFLOAD; + } else if (vlan_depth > SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { +#ifdef HAVE_NDO_SET_FEATURES + dev->hw_features &= SSSNIC_VLAN_CLEAR_OFFLOAD; +#endif + dev->features &= SSSNIC_VLAN_CLEAR_OFFLOAD; + } +} +#endif + +static int sss_nic_netdev_event_handler(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ +#if IS_ENABLED(CONFIG_VLAN_8021Q) + u16 vlan_depth; +#endif + struct net_device *real_dev = NULL; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (!is_vlan_dev(dev)) + return NOTIFY_DONE; + + if (event != NETDEV_REGISTER) + return NOTIFY_DONE; + + dev_hold(dev); + + real_dev = vlan_dev_real_dev(dev); + if (!sss_nic_is_netdev_ops_match(real_dev)) + goto out; + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + vlan_depth = sss_nic_get_vlan_depth(dev); + sss_nic_clear_netdev_vlan_offload(dev, vlan_depth); +#endif +out: + dev_put(dev); + + return NOTIFY_DONE; +} +#endif + +static netdev_features_t sss_nic_default_cso_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_CSUM(nic_dev->nic_io)) + feature |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (SSSNIC_SUPPORT_SCTP_CRC(nic_dev->nic_io)) + feature |= NETIF_F_SCTP_CRC; + + return feature; +} + +static netdev_features_t sss_nic_default_gso_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_TSO(nic_dev->nic_io)) + feature |= NETIF_F_TSO | NETIF_F_TSO6; +#ifdef HAVE_ENCAPSULATION_TSO + if (SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->nic_io)) + feature |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; +#endif /* HAVE_ENCAPSULATION_TSO */ + + return feature; +} + +static netdev_features_t sss_nic_default_vlan_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_RXVLAN_FILTER(nic_dev->nic_io)) { +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + feature |= NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + feature |= NETIF_F_HW_VLAN_FILTER; +#endif + } + + if (SSSNIC_SUPPORT_VLAN_OFFLOAD(nic_dev->nic_io)) { +#if defined(NETIF_F_HW_VLAN_CTAG_TX) + feature |= NETIF_F_HW_VLAN_CTAG_TX; +#elif defined(NETIF_F_HW_VLAN_TX) + feature |= NETIF_F_HW_VLAN_TX; +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) + feature |= NETIF_F_HW_VLAN_CTAG_RX; +#elif defined(NETIF_F_HW_VLAN_RX) + feature |= NETIF_F_HW_VLAN_RX; +#endif + } + + return feature; +} + +static netdev_features_t sss_nic_default_lro_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_LRO(nic_dev->nic_io)) + feature = NETIF_F_LRO; + + return feature; +} + +static void sss_nic_init_netdev_hw_feature(struct sss_nic_dev *nic_dev, + netdev_features_t lro_feature) +{ + struct net_device *netdev = nic_dev->netdev; + netdev_features_t hw_features = 0; + + hw_features = netdev->hw_features; + + hw_features |= netdev->features | lro_feature; + + netdev->hw_features = hw_features; +} + +static void sss_nic_init_netdev_hw_enc_feature(struct sss_nic_dev *nic_dev, + netdev_features_t cso_feature, + netdev_features_t gso_feature) +{ + struct net_device *netdev = nic_dev->netdev; + +#ifdef HAVE_ENCAPSULATION_CSUM + netdev->hw_enc_features |= SSSNIC_NETDEV_DEFAULT_FEATURE; + if (SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->nic_io)) { + netdev->hw_enc_features |= cso_feature; +#ifdef HAVE_ENCAPSULATION_TSO + netdev->hw_enc_features |= gso_feature | NETIF_F_TSO_ECN; +#endif /* HAVE_ENCAPSULATION_TSO */ + } +#endif /* HAVE_ENCAPSULATION_CSUM */ +} + +static void sss_nic_init_netdev_feature(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + netdev_features_t cso_feature = 0; + netdev_features_t gso_feature = 0; + netdev_features_t vlan_feature = 0; + netdev_features_t lro_feature = 0; + + cso_feature = sss_nic_default_cso_feature(nic_dev); + gso_feature = sss_nic_default_gso_feature(nic_dev); + vlan_feature = sss_nic_default_vlan_feature(nic_dev); + lro_feature = sss_nic_default_lro_feature(nic_dev); + + netdev->features |= SSSNIC_NETDEV_DEFAULT_FEATURE | + cso_feature | gso_feature | vlan_feature; + netdev->vlan_features |= SSSNIC_NETDEV_DEFAULT_FEATURE | + cso_feature | gso_feature; + + sss_nic_init_netdev_hw_feature(nic_dev, lro_feature); + sss_nic_init_netdev_hw_enc_feature(nic_dev, cso_feature, gso_feature); + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +} + +static void sss_nic_init_intr_coal_param(struct sss_nic_intr_coal_info *intr_coal, u16 max_qp) +{ + u16 i; + + for (i = 0; i < max_qp; i++) { + intr_coal[i].pkt_rate_low = SSSNIC_RX_RATE_LOW; + intr_coal[i].pkt_rate_high = SSSNIC_RX_RATE_HIGH; + intr_coal[i].rx_usecs_low = SSSNIC_RX_COAL_TIME_LOW; + intr_coal[i].rx_usecs_high = SSSNIC_RX_COAL_TIME_HIGH; + intr_coal[i].rx_pending_limt_low = SSSNIC_RX_PENDING_LIMIT_LOW; + intr_coal[i].rx_pending_limt_high = SSSNIC_RX_PENDING_LIMIT_HIGH; + intr_coal[i].pending_limt = msix_pending_limit; + intr_coal[i].coalesce_timer = msix_coalesc_timer; + intr_coal[i].resend_timer = SSSNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + } +} + +static int sss_nic_init_intr_coalesce(struct sss_nic_dev *nic_dev) +{ + u64 coalesce_size; + + coalesce_size = sizeof(*nic_dev->coal_info) * nic_dev->max_qp_num; + nic_dev->coal_info = kzalloc(coalesce_size, GFP_KERNEL); + if (!nic_dev->coal_info) + return -ENOMEM; + + sss_nic_init_intr_coal_param(nic_dev->coal_info, nic_dev->max_qp_num); + + if (test_bit(SSSNIC_INTR_ADAPT, &nic_dev->flags)) + nic_dev->use_adaptive_rx_coalesce = 1; + else + nic_dev->use_adaptive_rx_coalesce = 0; + + return 0; +} + +static void sss_nic_deinit_intr_coalesce(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->coal_info); + nic_dev->coal_info = NULL; +} + +static int sss_nic_alloc_lb_test_buf(struct sss_nic_dev *nic_dev) +{ + u8 *loop_test_rx_buf = NULL; + + loop_test_rx_buf = kvmalloc(SSSNIC_LP_PKT_CNT * SSSNIC_LP_PKT_LEN, GFP_KERNEL); + if (!loop_test_rx_buf) + return -ENOMEM; + + nic_dev->loop_test_rx_buf = loop_test_rx_buf; + nic_dev->loop_pkt_len = SSSNIC_LP_PKT_LEN; + + return 0; +} + +static void sss_nic_free_lb_test_buf(struct sss_nic_dev *nic_dev) +{ + kvfree(nic_dev->loop_test_rx_buf); + nic_dev->loop_test_rx_buf = NULL; +} + +static void sss_nic_dev_deinit(struct sss_nic_dev *nic_dev) +{ + sss_nic_free_lb_test_buf(nic_dev); + + sss_nic_deinit_intr_coalesce(nic_dev); + + sss_nic_free_rq_desc_group(nic_dev); + + sss_nic_free_sq_desc_group(nic_dev); + + sss_nic_clean_mac_list_filter(nic_dev); + + sss_nic_del_mac(nic_dev, nic_dev->netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + + sss_nic_free_rss_key(nic_dev); + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags)) + sss_nic_set_hw_dcb_state(nic_dev, + SSSNIC_MBX_OPCODE_SET_DCB_STATE, SSSNIC_DCB_STATE_DISABLE); +} + +static int sss_nic_init_mac_addr(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_get_default_mac(nic_dev, (u8 *)(netdev->dev_addr)); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to get MAC address\n"); + return ret; + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + nic_info(nic_dev->dev_hdl, + "Invalid default mac address %pM\n", netdev->dev_addr); + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + nic_err(nic_dev->dev_hdl, "Invalid default MAC address\n"); + return -EIO; + } + + eth_hw_addr_random(netdev); + nic_info(nic_dev->dev_hdl, + "Use random mac address %pM\n", netdev->dev_addr); + } + + ret = sss_nic_set_mac(nic_dev, netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + if (ret != 0 && ret != SSSNIC_PF_SET_VF_ALREADY) { + /* If it is a VF device, it is possible that the MAC address has been set by PF, + * and this situation is legal. + */ + nic_err(nic_dev->dev_hdl, "Fail to set default MAC\n"); + return ret; + } + + return 0; +} + +static void sss_nic_set_mtu_range(struct net_device *netdev) +{ + /* MTU range: 384 - 9600 */ +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + netdev->min_mtu = SSSNIC_MIN_MTU_SIZE; + netdev->max_mtu = SSSNIC_MAX_JUMBO_FRAME_SIZE; +#endif + +#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = SSSNIC_MIN_MTU_SIZE; + netdev->extended->max_mtu = SSSNIC_MAX_JUMBO_FRAME_SIZE; +#endif +} + +static int sss_nic_dev_init(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + + /* get nic cap from hw */ + sss_get_nic_capability(nic_dev->hwdev, &nic_dev->nic_svc_cap); + + ret = sss_nic_dcb_init(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init dcb\n"); + return -EFAULT; + } + + sss_nic_try_to_enable_rss(nic_dev); + + ret = sss_nic_init_mac_addr(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init mac address\n"); + goto init_mac_addr_err; + } + + sss_nic_set_mtu_range(netdev); + + ret = sss_nic_alloc_sq_desc_group(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init sq\n"); + goto init_sq_err; + } + + ret = sss_nic_alloc_rq_desc_group(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init rq\n"); + goto init_rq_err; + } + + ret = sss_nic_init_intr_coalesce(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init interrupt and coalesce\n"); + goto init_intr_coalesce_err; + } + + ret = sss_nic_alloc_lb_test_buf(nic_dev); + if (ret) { + nic_err(nic_dev->dev_hdl, "Fail to alloc loopback test buf\n"); + goto alloc_lb_test_buf_err; + } + + return 0; + +alloc_lb_test_buf_err: + sss_nic_deinit_intr_coalesce(nic_dev); + +init_intr_coalesce_err: + sss_nic_free_rq_desc_group(nic_dev); + +init_rq_err: + sss_nic_free_sq_desc_group(nic_dev); + +init_sq_err: + sss_nic_del_mac(nic_dev, netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + +init_mac_addr_err: + sss_nic_free_rss_key(nic_dev); + + return ret; +} + +static void sss_nic_init_netdev_ops(struct sss_nic_dev *nic_dev) +{ + sss_nic_set_netdev_ops(nic_dev); + + sss_nic_set_ethtool_ops(nic_dev); + + nic_dev->netdev->watchdog_timeo = WATCHDOG_TIMEOUT * HZ; +} + +static void sss_nic_validate_parameters(struct pci_dev *pdev) +{ + u16 i; + u16 valid_rx_buff_len_list[] = { + RX_BUFF_VALID_2KB, RX_BUFF_VALID_4KB, + RX_BUFF_VALID_8KB, RX_BUFF_VALID_16KB + }; + + if (!POLL_BUDGET_IS_VALID(poll_budget)) + poll_budget = DEFAULT_POLL_BUDGET; + + for (i = 0; i < ARRAY_LEN(valid_rx_buff_len_list); i++) { + if (rx_buff_size == valid_rx_buff_len_list[i]) + return; + } + + rx_buff_size = DEFAULT_RX_BUFF_LEN; +} + +static void sss_nic_periodic_work_handler(struct work_struct *work) +{ + struct delayed_work *delay_work = to_delayed_work(work); + struct sss_nic_dev *nic_dev = container_of(delay_work, struct sss_nic_dev, routine_work); + + if (SSSNIC_TEST_CLEAR_NIC_EVENT_FLAG(nic_dev, SSSNIC_EVENT_TX_TIMEOUT)) + sss_fault_event_report(nic_dev->hwdev, SSS_FAULT_SRC_TX_TIMEOUT, + SSS_FAULT_LEVEL_SERIOUS_FLR); + + queue_delayed_work(nic_dev->workq, &nic_dev->routine_work, HZ); +} + +static void sss_nic_dev_resource_destroy(struct sss_nic_dev *nic_dev) +{ + destroy_workqueue(nic_dev->workq); + kfree(nic_dev->vlan_bitmap); +} + +static int sss_nic_dev_params_init(struct net_device *netdev, + struct sss_hal_dev *uld_dev) +{ + struct pci_dev *pdev = uld_dev->pdev; + struct sss_nic_dev *nic_dev; + + nic_dev = (struct sss_nic_dev *)netdev_priv(netdev); + nic_dev->hwdev = uld_dev->hwdev; + nic_dev->netdev = netdev; + nic_dev->pdev = pdev; + nic_dev->dev_hdl = &pdev->dev; + nic_dev->uld_dev = uld_dev; + nic_dev->rx_buff_len = RX_BUFF_TO_BYTES(rx_buff_size); + nic_dev->rx_dma_buff_size = RX_BUFF_TO_DMA_SIZE(nic_dev->rx_buff_len); + nic_dev->page_order = BUFF_SIZE_TO_PAGE_ORDER(nic_dev->rx_dma_buff_size); + nic_dev->poll_budget = (int)poll_budget; + nic_dev->rx_poll_wqe = rx_poll_wqe; + nic_dev->msg_enable = DEFAULT_MSG_ENABLE; + nic_dev->qp_res.sq_depth = SSSNIC_SQ_DEPTH; + nic_dev->qp_res.rq_depth = SSSNIC_RQ_DEPTH; + nic_dev->max_qp_num = sss_get_max_sq_num(nic_dev->hwdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + + mutex_init(&nic_dev->qp_mutex); + sema_init(&nic_dev->port_sem, 1); + + nic_dev->vlan_bitmap = kzalloc(SSSNIC_VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); + if (!nic_dev->vlan_bitmap) + return -ENOMEM; + + nic_dev->workq = create_singlethread_workqueue(SSSNIC_DEV_WQ_NAME); + if (!nic_dev->workq) { + nic_err(&pdev->dev, "Fail to initialize nic workqueue\n"); + kfree(nic_dev->vlan_bitmap); + return -ENOMEM; + } + + INIT_LIST_HEAD(&nic_dev->tcam_info.tcam_node_info.tcam_node_list); + INIT_LIST_HEAD(&nic_dev->tcam_info.tcam_list); + INIT_LIST_HEAD(&nic_dev->rx_rule.rule_list); + + INIT_LIST_HEAD(&nic_dev->mc_filter_list); + INIT_LIST_HEAD(&nic_dev->uc_filter_list); + + INIT_DELAYED_WORK(&nic_dev->routine_work, sss_nic_periodic_work_handler); + INIT_DELAYED_WORK(&nic_dev->rq_watchdog_work, sss_nic_rq_watchdog_handler); + INIT_WORK(&nic_dev->rx_mode_work, sss_nic_set_rx_mode_work); + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_INTR_ADAPT); + + return 0; +} + +static void sss_nic_set_default_link_follow(struct sss_nic_dev *nic_dev) +{ + int ret; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + return; + + if (link_follow_status >= SSSNIC_LINK_FOLLOW_STATUS_MAX) + return; + + ret = sss_nic_set_link_follow_state(nic_dev, link_follow_status); + if (ret == SSS_MGMT_CMD_UNSUPPORTED) + nic_warn(nic_dev->dev_hdl, + "Firmware doesn't support to set link status follow port status\n"); +} + +static int sss_nic_set_default_feature_to_hw(struct sss_nic_dev *nic_dev) +{ + int ret; + + sss_nic_set_default_link_follow(nic_dev); + + ret = sss_nic_set_feature_to_hw(nic_dev->nic_io); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to set nic feature\n"); + return ret; + } + + /* enable all features in netdev->features */ + ret = sss_nic_enable_netdev_feature(nic_dev); + if (ret != 0) { + sss_nic_update_nic_feature(nic_dev, 0); + sss_nic_set_feature_to_hw(nic_dev->nic_io); + nic_err(nic_dev->dev_hdl, "Fail to set netdev feature\n"); + return ret; + } + + if (SSSNIC_SUPPORT_RXQ_RECOVERY(nic_dev->nic_io)) + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + + return 0; +} + +static struct net_device *sss_nic_alloc_netdev(void *hwdev) +{ + u16 max_qps = sss_get_max_sq_num(hwdev); + + return alloc_etherdev_mq(sizeof(struct sss_nic_dev), max_qps); +} + +static void sss_nic_free_netdev(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->vlan_bitmap); + free_netdev(nic_dev->netdev); +} + +static int sss_nic_reset_function(void *hwdev) +{ + u16 glb_func_id = sss_get_global_func_id(hwdev); + + return sss_chip_reset_function(hwdev, glb_func_id, SSS_NIC_RESET, SSS_CHANNEL_NIC); +} + +static int sss_nic_init_netdev(struct sss_nic_dev *nic_dev) +{ + int ret; + + sss_nic_init_netdev_ops(nic_dev); + + sss_nic_init_netdev_feature(nic_dev); + + ret = sss_nic_set_default_feature_to_hw(nic_dev); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_nic_deinit_netdev(struct sss_nic_dev *nic_dev) +{ + sss_nic_update_nic_feature(nic_dev, 0); + sss_nic_set_feature_to_hw(nic_dev->nic_io); +} + +static int sss_nic_register_netdev(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_register_notifier(nic_dev); +#endif + + ret = register_netdev(netdev); + if (ret != 0) { +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_unregister_notifier(nic_dev); +#endif + nic_err(nic_dev->dev_hdl, "Fail to register netdev\n"); + return -ENOMEM; + } + + queue_delayed_work(nic_dev->workq, &nic_dev->routine_work, HZ); + + netif_carrier_off(netdev); + + return 0; +} + +static void sss_nic_unregister_netdev(struct sss_nic_dev *nic_dev) +{ + unregister_netdev(nic_dev->netdev); + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_unregister_notifier(nic_dev); +#endif + cancel_delayed_work_sync(&nic_dev->routine_work); + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + cancel_work_sync(&nic_dev->rx_mode_work); + destroy_workqueue(nic_dev->workq); +} + +static int sss_nic_probe(struct sss_hal_dev *hal_dev, void **uld_dev, + char *uld_dev_name) +{ + struct pci_dev *pdev = hal_dev->pdev; + void *hwdev = hal_dev->hwdev; + struct sss_nic_dev *nic_dev = NULL; + struct net_device *netdev = NULL; + int ret; + + if (!sss_support_nic(hwdev)) { + nic_info(&pdev->dev, "Hw don't support nic\n"); + return 0; + } + + nic_info(&pdev->dev, "NIC probe begin\n"); + + sss_nic_validate_parameters(pdev); + + ret = sss_nic_reset_function(hwdev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to reset function\n"); + goto err_out; + } + + netdev = sss_nic_alloc_netdev(hwdev); + if (!netdev) { + nic_err(&pdev->dev, "Fail to allocate net device\n"); + ret = -ENOMEM; + goto err_out; + } + + ret = sss_nic_dev_params_init(netdev, hal_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic_dev params\n"); + goto nic_dev_params_init_err; + } + + nic_dev = (struct sss_nic_dev *)netdev_priv(netdev); + + ret = sss_nic_io_init(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic io\n"); + goto nic_io_init_err; + } + + ret = sss_nic_dev_init(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic dev\n"); + goto nic_dev_init_err; + } + + ret = sss_nic_init_netdev(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init net device\n"); + goto init_netdev_err; + } + + ret = sss_nic_register_netdev(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to register net device\n"); + goto register_netdev_err; + } + + *uld_dev = nic_dev; + nic_info(&pdev->dev, "Success to probe NIC\n"); + + return 0; + +register_netdev_err: + sss_nic_deinit_netdev(nic_dev); + +init_netdev_err: + sss_nic_dev_deinit(nic_dev); + +nic_dev_init_err: + sss_nic_io_deinit(nic_dev); + +nic_io_init_err: + sss_nic_dev_resource_destroy(nic_dev); + +nic_dev_params_init_err: + free_netdev(netdev); + +err_out: + nic_err(&pdev->dev, "Fail to run NIC probe\n"); + + return ret; +} + +static void sss_nic_remove(struct sss_hal_dev *hal_dev, void *adapter) +{ + struct sss_nic_dev *nic_dev = adapter; + + if (!nic_dev || !sss_support_nic(hal_dev->hwdev)) + return; + + nic_info(&hal_dev->pdev->dev, "NIC remove begin\n"); + + sss_nic_unregister_netdev(nic_dev); + + sss_nic_flush_tcam(nic_dev); + + sss_nic_deinit_netdev(nic_dev); + + sss_nic_dev_deinit(nic_dev); + + sss_nic_io_deinit(nic_dev); + + sss_nic_free_netdev(nic_dev); + + nic_info(&hal_dev->pdev->dev, "Success to remove NIC\n"); +} + +static void sss_nic_sriov_state_change(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_sriov_state_info *info = (void *)event->event_data; + + if (!info->enable) + sss_nic_clear_all_vf_info(nic_dev->nic_io); +} + +static void sss_nic_port_module_cable_plug(struct sss_nic_dev *nic_dev, void *event_data) +{ + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable plugged\n"); +} + +static void sss_nic_port_module_cable_unplug(struct sss_nic_dev *nic_dev, void *event_data) +{ + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable unplugged\n"); +} + +static void sss_nic_port_module_link_err(struct sss_nic_dev *nic_dev, void *event_data) +{ + struct sss_nic_port_module_event *port_event = event_data; + enum link_err_type err_type = port_event->err_type; + + nicif_info(nic_dev, link, nic_dev->netdev, + "Fail to link, err_type: 0x%x\n", err_type); +} + +static void sss_nic_port_module_event_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_nic_port_module_event *port_event = (void *)event->event_data; + enum port_module_event_type type = port_event->type; + + sss_nic_port_module_event_handler_t handler[SSSNIC_PORT_MODULE_MAX_EVENT] = { + sss_nic_port_module_cable_plug, + sss_nic_port_module_cable_unplug, + sss_nic_port_module_link_err, + }; + + if (type >= SSSNIC_PORT_MODULE_MAX_EVENT) { + nicif_err(nic_dev, link, nic_dev->netdev, + "Unknown port module type %d\n", type); + return; + } + + if (handler[type]) + handler[type](nic_dev, event->event_data); +} + +static void sss_nic_link_down(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + test_bit(SSSNIC_LP_TEST, &nic_dev->flags) || + test_bit(SSSNIC_FORCE_LINK_UP, &nic_dev->flags)) + return; + + if (!netif_carrier_ok(netdev)) + return; + + netif_carrier_off(netdev); + nic_dev->link_status = false; + nicif_info(nic_dev, link, netdev, "Link is down\n"); +} + +static void sss_nic_link_up(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + test_bit(SSSNIC_LP_TEST, &nic_dev->flags) || + test_bit(SSSNIC_FORCE_LINK_UP, &nic_dev->flags)) + return; + + if (netif_carrier_ok(netdev)) + return; + + netif_carrier_on(netdev); + nic_dev->link_status = true; + + nicif_info(nic_dev, link, netdev, "Link is up\n"); +} + +static void sss_nic_comm_fail_envet_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_fault_event *fault = (void *)event->event_data; + + if (fault->fault_level == SSS_FAULT_LEVEL_SERIOUS_FLR && + fault->info.chip.func_id == sss_get_global_func_id(nic_dev->hwdev)) + sss_nic_link_down(nic_dev, event); +} + +static void sss_nic_event_handler(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + sss_nic_event_handler_t handler[SSSNIC_EVENT_MAX] = { + sss_nic_link_down, + sss_nic_link_up, + sss_nic_port_module_event_handler, + NULL, + }; + + if (event->type >= SSSNIC_EVENT_MAX) + return; + + if (handler[event->type]) + handler[event->type](nic_dev, event); +} + +static void sss_nic_comm_event_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + sss_nic_event_handler_t handler[SSS_EVENT_MAX] = { + sss_nic_link_down, + sss_nic_link_down, + sss_nic_comm_fail_envet_handler, + sss_nic_sriov_state_change, + NULL, + sss_nic_link_down, + }; + + if (event->type >= SSS_EVENT_MAX) + return; + + if (handler[event->type]) + handler[event->type](nic_dev, event); +} + +static void sss_nic_event(struct sss_hal_dev *uld_dev, void *adapter, + struct sss_event_info *event) +{ + struct sss_nic_dev *nic_dev = adapter; + + if (!nic_dev || !event || !sss_support_nic(uld_dev->hwdev)) + return; + + if (event->service == SSS_EVENT_SRV_NIC) { + sss_nic_event_handler(nic_dev, event); + return; + } + + if (event->service == SSS_EVENT_SRV_COMM) { + sss_nic_comm_event_handler(nic_dev, event); + return; + } +} + +struct sss_uld_info g_nic_uld_info = { + .probe = sss_nic_probe, + .remove = sss_nic_remove, + .suspend = NULL, + .resume = NULL, + .event = sss_nic_event, + .ioctl = sss_tool_ioctl, +}; + +static __init int sss_nic_init(void) +{ + int ret; + + pr_info("%s - version %s\n", SSSNIC_DRV_DESC, + SSSNIC_DRV_VERSION); + + ret = sss_init_pci(); + if (ret) { + pr_err("SDK init failed.\n"); + return ret; + } + + ret = sss_register_uld(SSS_SERVICE_TYPE_NIC, &g_nic_uld_info); + if (ret != 0) { + pr_err("Fail to register sss_nic uld\n"); + sss_exit_pci(); + return ret; + } + + return 0; +} + +static __exit void sss_nic_exit(void) +{ + sss_unregister_uld(SSS_SERVICE_TYPE_NIC); + sss_exit_pci(); +} + +#ifndef _LLT_TEST_ +module_init(sss_nic_init); +module_exit(sss_nic_exit); +#endif + +MODULE_AUTHOR("steven.song@3snic.com"); +MODULE_DESCRIPTION("3SNIC Network Interface Card Driver"); +MODULE_VERSION(SSSNIC_DRV_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c new file mode 100644 index 00000000000000..4ffc5826babaf2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c @@ -0,0 +1,800 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_dcb.h" +#include "sss_nic_netdev_ops.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" + +#define SSSNIC_MAX_VLAN_ID 4094 +#define SSSNIC_MAX_QOS_NUM 7 + +#define SSSNIC_TX_RATE_TABLE_FULL 12 + +static int sss_nic_ndo_open(struct net_device *netdev) +{ + int ret; + struct sss_nic_qp_info qp_info = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_info(nic_dev, drv, netdev, "Netdev already open\n"); + return 0; + } + + ret = sss_nic_io_resource_init(nic_dev->nic_io); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init nic io resource\n"); + return ret; + } + + ret = sss_nic_dev_resource_init(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp resource\n"); + goto init_dev_res_err; + } + + ret = sss_nic_qp_resource_init(nic_dev, &qp_info, &nic_dev->qp_res); + if (ret != 0) + goto alloc_qp_res_err; + + ret = sss_nic_open_dev(nic_dev, &qp_info, &nic_dev->qp_res); + if (ret != 0) + goto open_chan_err; + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto vport_err; + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP); + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); + + return 0; + +vport_err: + sss_nic_close_dev(nic_dev, &qp_info); + +open_chan_err: + sss_nic_qp_resource_deinit(nic_dev, &qp_info, &nic_dev->qp_res); + +alloc_qp_res_err: + sss_nic_dev_resource_deinit(nic_dev); + +init_dev_res_err: + sss_nic_io_resource_deinit(nic_dev->nic_io); + + return ret; +} + +static int sss_nic_ndo_stop(struct net_device *netdev) +{ + struct sss_nic_qp_info qp_info = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_info(nic_dev, drv, netdev, "Netdev already close\n"); + return 0; + } + + if (SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_CHANGE_RES_INVALID)) + goto out; + + sss_nic_vport_down(nic_dev); + sss_nic_close_dev(nic_dev, &qp_info); + sss_nic_qp_resource_deinit(nic_dev, &qp_info, &nic_dev->qp_res); + +out: + sss_nic_io_resource_deinit(nic_dev->nic_io); + sss_nic_dev_resource_deinit(nic_dev); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); + + return 0; +} + +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +#else +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#endif +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb) +#endif /* end of HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +{ + u8 cos; + u8 qp_num; + u16 sq_num; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) + return sss_nic_select_queue_by_hash_func(netdev, skb, netdev->real_num_tx_queues); + + sq_num = +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) + netdev_pick_tx(netdev, skb, NULL); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV + fallback(netdev, skb, sb_dev); +#else + fallback(netdev, skb); +#endif +#else + skb_tx_hash(netdev, skb); +#endif + + if (likely(!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE))) + return sq_num; + + cos = sss_nic_get_cos(nic_dev, skb); + + qp_num = (nic_dev->hw_dcb_cfg.cos_qp_num[cos] != 0) ? + sq_num % nic_dev->hw_dcb_cfg.cos_qp_num[cos] : 0; + sq_num = nic_dev->hw_dcb_cfg.cos_qp_offset[cos] + qp_num; + + return sq_num; +} + +#ifdef HAVE_NDO_GET_STATS64 +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void sss_nic_ndo_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *sss_nic_ndo_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif + +#else /* !HAVE_NDO_GET_STATS64 */ +static struct net_device_stats *sss_nic_ndo_get_stats(struct net_device *netdev) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); +#ifndef HAVE_NDO_GET_STATS64 +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *stats = &netdev->stats; +#else + struct net_device_stats *stats = &nic_dev->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +#endif /* HAVE_NDO_GET_STATS64 */ + + sss_nic_get_tx_stats(nic_dev, stats); + sss_nic_get_rx_stats(nic_dev, stats); + +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void sss_nic_ndo_tx_timeout(struct net_device *netdev, + unsigned int __maybe_unused queue) +#else +static void sss_nic_ndo_tx_timeout(struct net_device *netdev) +#endif +{ + struct sss_nic_io_queue *sq = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 sw_pi; + u32 hw_ci; + u8 qid; + + SSSNIC_STATS_TX_TIMEOUT_INC(nic_dev); + nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, qid))) + continue; + + sq = nic_dev->sq_desc_group[qid].sq; + sw_pi = sss_nic_get_sq_local_pi(sq); + hw_ci = sss_nic_get_sq_hw_ci(sq); + nicif_info(nic_dev, drv, netdev, + "Sq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi state: 0x%lx.\n", + qid, sw_pi, hw_ci, sss_nic_get_sq_local_ci(sq), + nic_dev->qp_res.irq_cfg[qid].napi.state); + + if (sw_pi != hw_ci) { + SSSNIC_SET_NIC_EVENT_FLAG(nic_dev, SSSNIC_EVENT_TX_TIMEOUT); + return; + } + } +} + +static int sss_nic_ndo_change_mtu(struct net_device *netdev, int new_mtu) +{ + int ret = 0; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + +#ifdef HAVE_XDP_SUPPORT + u32 xdp_max_mtu; + + if (SSSNIC_IS_XDP_ENABLE(nic_dev)) { + xdp_max_mtu = SSSNIC_XDP_MAX_MTU(nic_dev); + if (new_mtu > xdp_max_mtu) { + nicif_err(nic_dev, drv, netdev, + "Fail to change mtu to %d, max mtu is %d\n", + new_mtu, xdp_max_mtu); + return -EINVAL; + } + } +#endif + + ret = sss_nic_set_dev_mtu(nic_dev, (u16)new_mtu); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to change mtu to %d\n", + new_mtu); + return ret; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Success to change mtu from %u to %d\n", + netdev->mtu, new_mtu); + + netdev->mtu = new_mtu; + + return 0; +} + +static int sss_nic_ndo_set_mac_address(struct net_device *netdev, void *mac_addr) +{ + int ret = 0; + struct sockaddr *set_addr = mac_addr; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (!is_valid_ether_addr(set_addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, set_addr->sa_data)) { + nicif_info(nic_dev, drv, netdev, + "Already using mac addr: %pM\n", set_addr->sa_data); + return 0; + } + + ret = sss_nic_update_mac(nic_dev, set_addr->sa_data); + if (ret) + return ret; + + ether_addr_copy((u8 *)(netdev->dev_addr), set_addr->sa_data); + + nicif_info(nic_dev, drv, netdev, + "Success to set new mac addr: %pM\n", set_addr->sa_data); + + return 0; +} + +static int sss_nic_ndo_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vlan_id) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (vlan_id == 0) + return 0; + + ret = sss_nic_config_vlan(nic_dev, SSSNIC_MBX_OPCODE_ADD, vlan_id); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to add vlan %u\n", vlan_id); + return ret; + } + + SSSNIC_SET_VLAN_BITMAP(nic_dev, vlan_id); + nicif_info(nic_dev, drv, netdev, "Success to add vlan %u\n", vlan_id); + + return 0; +} + +static int sss_nic_ndo_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vlan_id) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (vlan_id == 0) + return 0; + + ret = sss_nic_config_vlan(nic_dev, SSSNIC_MBX_OPCODE_DEL, vlan_id); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to delete vlan\n"); + return ret; + } + + SSSNIC_CLEAR_VLAN_BITMAP(nic_dev, vlan_id); + nicif_info(nic_dev, drv, netdev, "Success to delete vlan %u\n", vlan_id); + + return 0; +} + +static netdev_features_t sss_nic_ndo_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t netdev_feature = features; + + /* If Rx checksum is disabled, then LRO should also be disabled */ + if ((netdev_feature & NETIF_F_RXCSUM) == 0) + netdev_feature &= ~NETIF_F_LRO; + + return netdev_feature; +} + +static int sss_nic_ndo_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return sss_nic_set_feature(nic_dev, nic_dev->netdev->features, features); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sss_nic_ndo_poll_controller(struct net_device *netdev) +{ + u16 i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + for (i = 0; i < nic_dev->qp_res.qp_num; i++) + napi_schedule(&nic_dev->qp_res.irq_cfg[i].napi); +} +#endif + +static void sss_nic_ndo_set_rx_mode(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || + netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { + nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); + nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_UPDATE_MAC_FILTER); + } + + queue_work(nic_dev->workq, &nic_dev->rx_mode_work); +} + +static int sss_nic_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_io *nic_io = nic_dev->nic_io; + struct sss_nic_vf_info *vf_info = NULL; + + if (vf_id >= pci_num_vf(nic_dev->pdev) || + is_multicast_ether_addr(mac)) + return -EINVAL; + + vf_info = &nic_io->vf_info_group[vf_id]; + ether_addr_copy(vf_info->user_mac, mac); + + if (is_zero_ether_addr(mac)) + nic_info(nic_dev->dev_hdl, + "Success to delete mac on vf %d\n", vf_id); + else + nic_info(nic_dev->dev_hdl, + "Success to set mac %pM on vf %d\n", mac, vf_id); + + return 0; +} + +#ifdef IFLA_VF_MAX +#ifdef IFLA_VF_VLAN_INFO_MAX +static int sss_nic_ndo_set_vf_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos, __be16 vlan_proto) +#else +static int sss_nic_ndo_set_vf_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos) +#endif +{ + u16 pre_vlanprio; + u16 cur_vlanprio; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (qos > SSSNIC_MAX_QOS_NUM || vlan_id > SSSNIC_MAX_VLAN_ID || + vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + pre_vlanprio = SSSNIC_GET_VLAN_PRIO(vlan_id, qos); + cur_vlanprio = + sss_nic_vf_info_vlan_prio(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + if (pre_vlanprio == cur_vlanprio) + return 0; + + return sss_nic_set_hw_vf_vlan(nic_dev, cur_vlanprio, vf_id, vlan_id, qos); +} +#endif + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +static int sss_nic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, + bool set_spoofchk) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + bool cur_spoofchk; + u16 id = SSSNIC_OS_VF_ID_TO_HW(vf_id); + int ret; + + if (vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; + + cur_spoofchk = SSSNIC_GET_VF_SPOOFCHK(nic_dev->nic_io, vf_id); + if (set_spoofchk == cur_spoofchk) + return 0; + + ret = sss_nic_set_vf_spoofchk(nic_dev->nic_io, id, set_spoofchk); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to %s spoofchk control for VF %d\n", + set_spoofchk ? "enable" : "disable", vf_id); + return ret; + } + + nicif_info(nic_dev, drv, netdev, + "Success to %s spoofchk control for VF %d\n", + set_spoofchk ? "enable" : "disable", vf_id); + return 0; +} +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +static int sss_nic_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool new_trust) +{ + bool old_trust; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev) || vf_id > nic_dev->nic_io->max_vf_num) { + nicif_err(nic_dev, drv, netdev, "Invalid vf id, VF: %d pci_num_vf: %d max_vfs: %d\n", + vf_id, pci_num_vf(nic_dev->pdev), nic_dev->nic_io->max_vf_num); + return -EINVAL; + } + + old_trust = !!nic_dev->nic_io->vf_info_group[vf_id].trust; + /* Same old and new, no need to set, return success directly */ + if (new_trust == old_trust) + return 0; + + nic_dev->nic_io->vf_info_group[vf_id].trust = !!new_trust; + + nicif_info(nic_dev, drv, netdev, "Success to set VF %d trust %d to %d\n", + vf_id, old_trust, new_trust); + + return 0; +} +#endif + +static int sss_nic_ndo_get_vf_config(struct net_device *netdev, + int vf_id, struct ifla_vf_info *ifla_vf) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; + + sss_nic_get_vf_attribute(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), ifla_vf); + + return 0; +} + +static int sss_nic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev)) { + nicif_err(nic_dev, drv, netdev, + "Invalid VF Id %d, pci_num_vf %d\n", vf_id, pci_num_vf(nic_dev->pdev)); + return -EINVAL; + } + + ret = sss_nic_set_vf_link_state(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), link); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set VF %d link state %d\n", vf_id, link); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set VF %d link state %d\n", + vf_id, link); + + return 0; +} + +static int sss_nic_check_vf_bw_param(const struct sss_nic_dev *nic_dev, + int vf_id, int min_rate, int max_rate) +{ + if (!SSSNIC_SUPPORT_RATE_LIMIT(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport to set vf rate limit.\n"); + return -EOPNOTSUPP; + } + + if (vf_id >= pci_num_vf(nic_dev->pdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid VF number %d\n", + pci_num_vf(nic_dev->pdev)); + return -EINVAL; + } + + if (max_rate < min_rate) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid rate, maximum rate %d minimum rate %d\n", + max_rate, min_rate); + return -EINVAL; + } + + if (max_rate < 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid maximum rate %d\n", max_rate); + return -EINVAL; + } + + return 0; +} + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +static int sss_nic_ndo_set_vf_rate(struct net_device *netdev, + int vf_id, int min_tx_rate, int max_tx_rate) +#else +static int sss_nic_ndo_set_vf_tx_rate(struct net_device *netdev, int vf_id, + int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ +#ifndef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + int min_tx_rate = 0; +#endif + u8 link_status; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + u32 speeds[] = {0, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, + SPEED_200000 + }; + int ret; + + ret = sss_nic_check_vf_bw_param(nic_dev, vf_id, min_tx_rate, max_tx_rate); + if (ret != 0) + return ret; + + ret = sss_nic_get_hw_link_state(nic_dev, &link_status); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to get link status when set vf tx rate.\n"); + return -EIO; + } + + if (link_status == 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set vf tx rate. the link state is down.\n"); + return -EINVAL; + } + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, + SSS_CHANNEL_NIC); + if (ret != 0 || port_info.speed >= SSSNIC_PORT_SPEED_UNKNOWN) + return -EIO; + + if (max_tx_rate > speeds[port_info.speed]) { + nicif_err(nic_dev, drv, netdev, "Invalid max_tx_rate, it must be in [0 - %u]\n", + speeds[port_info.speed]); + return -EINVAL; + } + + ret = sss_nic_set_vf_tx_rate_limit(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), + (u32)min_tx_rate, (u32)max_tx_rate); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set VF %d max rate %d min rate %d%s\n", + vf_id, max_tx_rate, min_tx_rate, + ret == SSSNIC_TX_RATE_TABLE_FULL ? + ", tx rate profile is full" : ""); + return -EIO; + } + + nicif_info(nic_dev, drv, netdev, + "Success to set VF %d tx rate [%u-%u]\n", + vf_id, min_tx_rate, max_tx_rate); + + return 0; +} + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF +static int sss_nic_ndo_bpf(struct net_device *netdev, struct netdev_bpf *xdp) +#else +static int sss_nic_ndo_xdp(struct net_device *netdev, struct netdev_xdp *xdp) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + +#ifdef HAVE_XDP_QUERY_PROG + if (xdp->command == XDP_QUERY_PROG) { + xdp->prog_id = nic_dev->xdp_prog ? nic_dev->xdp_prog->aux->id : 0; + return 0; + } +#endif + if (xdp->command == XDP_SETUP_PROG) + return sss_nic_setup_xdp(nic_dev, xdp); + + return -EINVAL; +} +#endif + +static const struct net_device_ops g_nic_netdev_ops = { + .ndo_open = sss_nic_ndo_open, + .ndo_stop = sss_nic_ndo_stop, + .ndo_start_xmit = sss_nic_ndo_start_xmit, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = sss_nic_ndo_get_stats64, +#else + .ndo_get_stats = sss_nic_ndo_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = sss_nic_ndo_tx_timeout, + .ndo_select_queue = sss_nic_ndo_select_queue, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = sss_nic_ndo_change_mtu, +#else + .ndo_change_mtu = sss_nic_ndo_change_mtu, +#endif + .ndo_set_mac_address = sss_nic_ndo_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = sss_nic_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sss_nic_ndo_vlan_rx_kill_vid, +#endif + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = sss_nic_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = sss_nic_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = sss_nic_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = sss_nic_ndo_set_vf_rate, +#else + .ndo_set_vf_tx_rate = sss_nic_ndo_set_vf_tx_rate, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = sss_nic_ndo_set_vf_spoofchk, +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = sss_nic_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = sss_nic_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = sss_nic_ndo_get_vf_config, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sss_nic_ndo_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = sss_nic_ndo_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF + .ndo_bpf = sss_nic_ndo_bpf, +#else + .ndo_xdp = sss_nic_ndo_xdp, +#endif +#endif + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = sss_nic_ndo_set_vf_link_state, +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = sss_nic_ndo_fix_features, + .ndo_set_features = sss_nic_ndo_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +static const struct net_device_ops g_nicvf_netdev_ops = { + .ndo_open = sss_nic_ndo_open, + .ndo_stop = sss_nic_ndo_stop, + .ndo_start_xmit = sss_nic_ndo_start_xmit, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = sss_nic_ndo_get_stats64, +#else + .ndo_get_stats = sss_nic_ndo_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = sss_nic_ndo_tx_timeout, + .ndo_select_queue = sss_nic_ndo_select_queue, + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = sss_nic_ndo_change_mtu, +#else + .ndo_change_mtu = sss_nic_ndo_change_mtu, +#endif + .ndo_set_mac_address = sss_nic_ndo_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = sss_nic_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sss_nic_ndo_vlan_rx_kill_vid, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sss_nic_ndo_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = sss_nic_ndo_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF + .ndo_bpf = sss_nic_ndo_bpf, +#else + .ndo_xdp = sss_nic_ndo_xdp, +#endif +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = sss_nic_ndo_fix_features, + .ndo_set_features = sss_nic_ndo_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +void sss_nic_set_netdev_ops(struct sss_nic_dev *nic_dev) +{ + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + nic_dev->netdev->netdev_ops = &g_nic_netdev_ops; + else + nic_dev->netdev->netdev_ops = &g_nicvf_netdev_ops; +} + +bool sss_nic_is_netdev_ops_match(const struct net_device *netdev) +{ + return netdev->netdev_ops == &g_nic_netdev_ops || + netdev->netdev_ops == &g_nicvf_netdev_ops; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h new file mode 100644 index 00000000000000..941dcca091f0d0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NETDEV_OPS_H +#define SSS_NIC_NETDEV_OPS_H + +#include +#include + +#include "sss_nic_dev_define.h" + +void sss_nic_set_netdev_ops(struct sss_nic_dev *nic_dev); +bool sss_nic_is_netdev_ops_match(const struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c new file mode 100644 index 00000000000000..c4ad4fe7bcd76a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c @@ -0,0 +1,1074 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_tx_init.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_rx.h" +#include "sss_nic_dcb.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_irq.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" + +#define IPV4_VERSION 4 +#define IPV6_VERSION 6 + +#define SSSNIC_LRO_DEF_COAL_PKT_SIZE 32 +#define SSSNIC_LRO_DEF_TIME_LIMIT 16 +#define SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT 100 + +#define SSSNIC_IPV6_ADDR_SIZE 4 +#define SSSNIC_PKT_INFO_SIZE 9 +#define SSSNIC_BIT_PER_TUPLE 32 + +#define SSSNIC_RSS_VAL(val, type) \ + (((type) == SSSNIC_RSS_ENGINE_TOEP) ? ntohl(val) : (val)) + +/* Low 16 bits are sport, High 16 bits are dport */ +#define SSSNIC_RSS_VAL_BY_L4_PORT(l4_hdr) \ + (((u32)ntohs(*((u16 *)(l4_hdr) + 1U)) << 16) | ntohs(*(u16 *)(l4_hdr))) + +#define SSSNIC_GET_SQ_ID_BY_RSS_INDIR(nic_dev, sq_id) \ + ((u16)(nic_dev)->rss_indir_tbl[(sq_id) & 0xFF]) + +#define SSSNIC_GET_DSCP_PRI_OFFSET 2 + +#define SSSNIC_FEATURE_OP_STR(op) ((op) ? "Enable" : "Disable") + +#define SSSNIC_VLAN_TCI_TO_COS_ID(skb) \ + ((skb)->vlan_tci >> VLAN_PRIO_SHIFT) + +#define SSSNIC_IPV4_DSF_TO_COS_ID(skb) \ + (ipv4_get_dsfield(ip_hdr(skb)) >> SSSNIC_GET_DSCP_PRI_OFFSET) + +#define SSSNIC_IPV6_DSF_TO_COS_ID(skb) \ + (ipv6_get_dsfield(ipv6_hdr(skb)) >> SSSNIC_GET_DSCP_PRI_OFFSET) + +static int sss_nic_alloc_qp_mgmt_info(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + u16 qp_num = qp_res->qp_num; + u32 len; + + len = sizeof(*qp_res->irq_cfg) * qp_num; + qp_res->irq_cfg = kzalloc(len, GFP_KERNEL); + if (!qp_res->irq_cfg) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc irq config\n"); + return -ENOMEM; + } + + len = sizeof(*qp_res->rq_res_group) * qp_num; + qp_res->rq_res_group = kzalloc(len, GFP_KERNEL); + if (!qp_res->rq_res_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rq res info\n"); + goto alloc_rq_res_err; + } + + len = sizeof(*qp_res->sq_res_group) * qp_num; + qp_res->sq_res_group = kzalloc(len, GFP_KERNEL); + if (!qp_res->sq_res_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq res info\n"); + goto alloc_sq_res_err; + } + + return 0; + +alloc_sq_res_err: + kfree(qp_res->rq_res_group); + qp_res->rq_res_group = NULL; + +alloc_rq_res_err: + kfree(qp_res->irq_cfg); + qp_res->irq_cfg = NULL; + + return -ENOMEM; +} + +static void sss_nic_free_qp_mgmt_info(struct sss_nic_qp_resource *qp_res) +{ + kfree(qp_res->irq_cfg); + kfree(qp_res->rq_res_group); + kfree(qp_res->sq_res_group); + qp_res->irq_cfg = NULL; + qp_res->sq_res_group = NULL; + qp_res->rq_res_group = NULL; +} + +static int sss_nic_alloc_qp_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + + ret = sss_nic_alloc_qp_mgmt_info(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc qp mgmt info\n"); + return ret; + } + + ret = sss_nic_alloc_rq_res_group(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rq resource\n"); + goto alloc_rq_res_err; + } + + ret = sss_nic_alloc_sq_resource(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq resource\n"); + goto alloc_sq_res_err; + } + + return 0; + +alloc_sq_res_err: + sss_nic_free_rq_res_group(nic_dev, qp_res); + +alloc_rq_res_err: + sss_nic_free_qp_mgmt_info(qp_res); + + return ret; +} + +static void sss_nic_free_qp_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + sss_nic_free_rq_res_group(nic_dev, qp_res); + sss_nic_free_sq_resource(nic_dev, qp_res); + sss_nic_free_qp_mgmt_info(qp_res); +} + +static int sss_nic_init_qp_wq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + + sss_nic_init_all_sq(nic_dev, qp_res); + + ret = sss_nic_init_rq_desc_group(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to configure rq\n"); + return ret; + } + + return 0; +} + +static void sss_nic_config_dcb_qp_map(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 cos_num; + u16 qp_num = nic_dev->qp_res.qp_num; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + sss_nic_update_sq_cos(nic_dev, 0); + return; + } + + cos_num = sss_nic_get_user_cos_num(nic_dev); + sss_nic_update_qp_cos_map(nic_dev, cos_num); + /* For now, we don't support to change cos_num */ + if (cos_num > nic_dev->max_cos_num || cos_num > qp_num) { + nicif_err(nic_dev, drv, netdev, + "Invalid cos_num: %u, qp_num: %u or RSS is disable, disable DCB\n", + cos_num, qp_num); + nic_dev->qp_res.cos_num = 0; + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + /* if we can't enable rss or get enough qp_num, + * need to sync default configure to hw + */ + sss_nic_update_dcb_cfg(nic_dev); + } + + sss_nic_update_sq_cos(nic_dev, 1); +} + +static int sss_nic_update_dev_cfg(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int ret; + + ret = sss_nic_set_dev_mtu(nic_dev, (u16)netdev->mtu); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set mtu\n"); + return ret; + } + + sss_nic_config_dcb_qp_map(nic_dev); + + ret = sss_nic_update_rx_rss(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update rx rss\n"); + return ret; + } + + return 0; +} + +static u16 sss_nic_realloc_qp_irq(struct sss_nic_dev *nic_dev, + u16 new_qp_irq_num) +{ + struct sss_irq_desc *qps_irq_info = nic_dev->irq_desc_group; + u16 act_irq_num; + u16 extra_irq_num; + u16 id; + u16 i; + + if (new_qp_irq_num > nic_dev->irq_desc_num) { + extra_irq_num = new_qp_irq_num - nic_dev->irq_desc_num; + act_irq_num = sss_alloc_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + &qps_irq_info[nic_dev->irq_desc_num], + extra_irq_num); + if (act_irq_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc irq\n"); + return nic_dev->irq_desc_num; + } + + nic_dev->irq_desc_num += act_irq_num; + } else if (new_qp_irq_num < nic_dev->irq_desc_num) { + extra_irq_num = nic_dev->irq_desc_num - new_qp_irq_num; + for (i = 0; i < extra_irq_num; i++) { + id = (nic_dev->irq_desc_num - i) - 1; + sss_free_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + qps_irq_info[id].irq_id); + qps_irq_info[id].irq_id = 0; + qps_irq_info[id].msix_id = 0; + } + nic_dev->irq_desc_num = new_qp_irq_num; + } + + return nic_dev->irq_desc_num; +} + +static void sss_nic_update_dcb_cos_map(struct sss_nic_dev *nic_dev, + const struct sss_nic_qp_resource *qp_res) +{ + u8 cos_num = qp_res->cos_num; + u16 max_qp = qp_res->qp_num; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + if (cos_num == 0 || cos_num > nic_dev->max_cos_num || cos_num > max_qp) + return; /* will disable DCB */ + + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); +} + +static void sss_nic_update_qp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + u16 alloc_irq_num; + u16 dst_irq_num; + u16 cur_irq_num; + struct net_device *netdev = nic_dev->netdev; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) + qp_res->qp_num = 1; + + sss_nic_update_dcb_cos_map(nic_dev, qp_res); + + if (nic_dev->irq_desc_num >= qp_res->qp_num) + goto out; + + cur_irq_num = nic_dev->irq_desc_num; + + alloc_irq_num = sss_nic_realloc_qp_irq(nic_dev, qp_res->qp_num); + if (alloc_irq_num < qp_res->qp_num) { + qp_res->qp_num = alloc_irq_num; + sss_nic_update_dcb_cos_map(nic_dev, qp_res); + nicif_warn(nic_dev, drv, netdev, + "Fail to alloc enough irq, qp_num: %u\n", + qp_res->qp_num); + + dst_irq_num = (u16)max_t(u16, cur_irq_num, qp_res->qp_num); + sss_nic_realloc_qp_irq(nic_dev, dst_irq_num); + } + +out: + nicif_info(nic_dev, drv, netdev, "Finally qp_num: %u\n", + qp_res->qp_num); +} + +static int sss_nic_init_qp_irq(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u32 irq_info_len = sizeof(*nic_dev->irq_desc_group) * nic_dev->max_qp_num; + + nic_dev->irq_desc_num = 0; + + if (irq_info_len == 0) { + nicif_err(nic_dev, drv, netdev, "Invalid irq_info_len\n"); + return -EINVAL; + } + + nic_dev->irq_desc_group = kzalloc(irq_info_len, GFP_KERNEL); + if (!nic_dev->irq_desc_group) + return -ENOMEM; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) + nic_dev->qp_res.qp_num = 1; + + if (nic_dev->irq_desc_num >= nic_dev->qp_res.qp_num) { + nicif_info(nic_dev, drv, netdev, "Finally qp_num: %u\n", + nic_dev->qp_res.qp_num); + return 0; + } + + nic_dev->irq_desc_num = sss_alloc_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + nic_dev->irq_desc_group, nic_dev->qp_res.qp_num); + if (nic_dev->irq_desc_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc qp irq\n"); + kfree(nic_dev->irq_desc_group); + nic_dev->irq_desc_group = NULL; + return -ENOMEM; + } + + if (nic_dev->irq_desc_num < nic_dev->qp_res.qp_num) { + nic_dev->qp_res.qp_num = nic_dev->irq_desc_num; + nicif_warn(nic_dev, drv, netdev, + "Fail to alloc enough irq, now qp_num: %u\n", + nic_dev->qp_res.qp_num); + } + + return 0; +} + +static void sss_nic_deinit_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 id; + + for (id = 0; id < nic_dev->irq_desc_num; id++) + sss_free_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + nic_dev->irq_desc_group[id].irq_id); + + kfree(nic_dev->irq_desc_group); + nic_dev->irq_desc_group = NULL; +} + +int sss_nic_dev_resource_init(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_init_qp_irq(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init irq info\n"); + return ret; + } + + sss_nic_update_dcb_cos_map(nic_dev, &nic_dev->qp_res); + + return 0; +} + +void sss_nic_dev_resource_deinit(struct sss_nic_dev *nic_dev) +{ + sss_nic_deinit_qp_irq(nic_dev); +} + +static int sss_nic_set_port_state(struct sss_nic_dev *nic_dev, bool state) +{ + int ret; + + down(&nic_dev->port_sem); + + ret = sss_nic_set_hw_port_state(nic_dev, state, SSS_CHANNEL_NIC); + + up(&nic_dev->port_sem); + + return ret; +} + +static void sss_nic_update_link_state(struct sss_nic_dev *nic_dev, + u8 link_state) +{ + struct net_device *netdev = nic_dev->netdev; + + if (nic_dev->link_status == link_state) + return; + + nic_dev->link_status = link_state; + + nicif_info(nic_dev, link, netdev, "Link is %s\n", + (link_state ? "up" : "down")); +} + +int sss_nic_qp_resource_init(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + qp_info->sq_depth = qp_res->sq_depth; + qp_info->rq_depth = qp_res->rq_depth; + qp_info->qp_num = qp_res->qp_num; + + ret = sss_nic_alloc_qp(nic_dev->nic_io, nic_dev->irq_desc_group, qp_info); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to alloc qp\n"); + return ret; + } + + ret = sss_nic_alloc_qp_resource(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to alloc qp resource\n"); + sss_nic_free_qp(nic_dev->nic_io, qp_info); + return ret; + } + + return 0; +} + +void sss_nic_qp_resource_deinit(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + mutex_lock(&nic_dev->qp_mutex); + sss_nic_free_qp_resource(nic_dev, qp_res); + sss_nic_free_qp(nic_dev->nic_io, qp_info); + mutex_unlock(&nic_dev->qp_mutex); +} + +int sss_nic_open_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_init_qp_info(nic_dev->nic_io, qp_info); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp info\n"); + return ret; + } + + ret = sss_nic_init_qp_wq(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp wq\n"); + goto cfg_qp_err; + } + + ret = sss_nic_request_qp_irq(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to request qp irq\n"); + goto init_qp_irq_err; + } + + ret = sss_nic_update_dev_cfg(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update configure\n"); + goto cfg_err; + } + + return 0; + +cfg_err: + sss_nic_release_qp_irq(nic_dev); + +init_qp_irq_err: +cfg_qp_err: + sss_nic_deinit_qp_info(nic_dev->nic_io, qp_info); + + return ret; +} + +void sss_nic_close_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info) +{ + sss_nic_reset_rx_rss(nic_dev->netdev); + sss_nic_release_qp_irq(nic_dev); + sss_nic_deinit_qp_info(nic_dev->nic_io, qp_info); +} + +int sss_nic_vport_up(struct sss_nic_dev *nic_dev) +{ + u16 func_id; + u8 link_state = 0; + int ret; + struct net_device *netdev = nic_dev->netdev; + + func_id = sss_get_global_func_id(nic_dev->hwdev); + ret = sss_nic_set_hw_vport_state(nic_dev, func_id, true, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set vport enable\n"); + goto set_vport_state_err; + } + + ret = sss_nic_set_port_state(nic_dev, true); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set port enable\n"); + goto set_port_state_err; + } + + netif_set_real_num_rx_queues(netdev, nic_dev->qp_res.qp_num); + netif_set_real_num_tx_queues(netdev, nic_dev->qp_res.qp_num); + netif_tx_wake_all_queues(netdev); + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) { + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (ret == 0 && link_state != 0) + netif_carrier_on(netdev); + } else { + link_state = true; + netif_carrier_on(netdev); + } + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + + sss_nic_update_link_state(nic_dev, link_state); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, link_state); + + return 0; + +set_port_state_err: + sss_nic_set_hw_vport_state(nic_dev, func_id, false, SSS_CHANNEL_NIC); + +set_vport_state_err: + sss_nic_clear_hw_qp_resource(nic_dev); + /*No packets will be send to host when after set vport disable 100ms*/ + msleep(SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT); + + return ret; +} + +void sss_nic_vport_down(struct sss_nic_dev *nic_dev) +{ + u16 func_id; + + netif_carrier_off(nic_dev->netdev); + netif_tx_disable(nic_dev->netdev); + + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + cancel_delayed_work_sync(&nic_dev->moderation_task); + + if (sss_get_dev_present_flag(nic_dev->hwdev) == 0) + return; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev) == 0) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, 0); + + sss_nic_set_port_state(nic_dev, false); + + func_id = sss_get_global_func_id(nic_dev->hwdev); + sss_nic_set_hw_vport_state(nic_dev, func_id, false, SSS_CHANNEL_NIC); + + sss_nic_flush_all_sq(nic_dev); + msleep(SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT); + sss_nic_clear_hw_qp_resource(nic_dev); +} + +int sss_nic_update_channel_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res, + sss_nic_reopen_handler_t reopen_hdl, + const void *priv_data) +{ + struct net_device *netdev = nic_dev->netdev; + struct sss_nic_qp_info cur_qp_info = {0}; + struct sss_nic_qp_info new_qp_info = {0}; + int ret; + + sss_nic_update_qp_info(nic_dev, qp_res); + + ret = sss_nic_qp_resource_init(nic_dev, &new_qp_info, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to alloc channel resource\n"); + return ret; + } + + if (!SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_CHANGE_RES_INVALID)) { + sss_nic_vport_down(nic_dev); + sss_nic_close_dev(nic_dev, &cur_qp_info); + sss_nic_qp_resource_deinit(nic_dev, &cur_qp_info, + &nic_dev->qp_res); + } + + if (nic_dev->irq_desc_num > qp_res->qp_num) + sss_nic_realloc_qp_irq(nic_dev, qp_res->qp_num); + nic_dev->qp_res = *qp_res; + + if (reopen_hdl) + reopen_hdl(nic_dev, priv_data); + + ret = sss_nic_open_dev(nic_dev, &new_qp_info, qp_res); + if (ret != 0) + goto open_channel_err; + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto up_vport_err; + + clear_bit(SSSNIC_CHANGE_RES_INVALID, &nic_dev->flags); + nicif_info(nic_dev, drv, netdev, "Success to update channel settings\n"); + + return 0; + +up_vport_err: + sss_nic_close_dev(nic_dev, &new_qp_info); + +open_channel_err: + sss_nic_qp_resource_deinit(nic_dev, &new_qp_info, qp_res); + + return ret; +} + +static u32 sss_nic_calc_xor_rss(u8 *rss_tunple, u32 size) +{ + u32 count; + u32 hash_value; + + hash_value = rss_tunple[0]; + for (count = 1; count < size; count++) + hash_value = hash_value ^ rss_tunple[count]; + + return hash_value; +} + +static u32 sss_nic_calc_toep_rss(const u32 *rss_tunple, u32 size, const u32 *rss_key) +{ + u32 i; + u32 j; + u32 rss = 0; + u32 tunple; + + for (i = 0; i < size; i++) { + for (j = 0; j < SSSNIC_BIT_PER_TUPLE; j++) { + tunple = rss_tunple[i] & + ((u32)1 << (u32)((SSSNIC_BIT_PER_TUPLE - 1) - j)); + if (tunple != 0) + rss ^= (rss_key[i] << j) | + ((u32)((u64)rss_key[i + 1] >> (SSSNIC_BIT_PER_TUPLE - j))); + } + } + + return rss; +} + +static u8 sss_nic_parse_ipv6_info(struct sk_buff *skb, u8 hash_engine, + u32 *rss_tunple, u32 *size) +{ + struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); + u32 *daddr = (u32 *)&ipv6hdr->daddr; + u32 *saddr = (u32 *)&ipv6hdr->saddr; + u32 offset; + u8 i; + + for (i = 0; i < SSSNIC_IPV6_ADDR_SIZE; i++) { + rss_tunple[i] = SSSNIC_RSS_VAL(daddr[i], hash_engine); + /* The offset of the sport relative to the dport is 4 */ + offset = (u32)(i + SSSNIC_IPV6_ADDR_SIZE); + rss_tunple[offset] = SSSNIC_RSS_VAL(saddr[i], hash_engine); + } + *size = SSSNIC_IPV6_ADDR_SIZE << 1; + + return (skb_network_header(skb) + sizeof(*ipv6hdr) == + skb_transport_header(skb)) ? ipv6hdr->nexthdr : 0; +} + +u16 sss_nic_select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int max_sq_num) +{ + struct iphdr *iphdr = NULL; + unsigned char *l4_hdr = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(dev); + struct sss_nic_rss_type rss_type = nic_dev->rss_type; + u8 l4_proto; + u32 sq_id = 0; + u32 cnt = 0; + u8 hash_engine = nic_dev->rss_hash_engine; + u32 rss_tunple[SSSNIC_PKT_INFO_SIZE] = {0}; + bool convert_flag; + + if (skb_rx_queue_recorded(skb)) { + sq_id = skb_get_rx_queue(skb); + if (unlikely(sq_id >= max_sq_num)) + sq_id %= max_sq_num; + + return (u16)sq_id; + } + + iphdr = ip_hdr(skb); + + if (iphdr->version != IPV4_VERSION && iphdr->version != IPV6_VERSION) + return (u16)sq_id; + + if (iphdr->version == IPV4_VERSION) { + rss_tunple[cnt++] = SSSNIC_RSS_VAL(iphdr->daddr, hash_engine); + rss_tunple[cnt++] = SSSNIC_RSS_VAL(iphdr->saddr, hash_engine); + l4_proto = iphdr->protocol; + convert_flag = ((l4_proto == IPPROTO_UDP) && rss_type.udp_ipv4) || + ((l4_proto == IPPROTO_TCP) && rss_type.tcp_ipv4); + } else { + l4_proto = sss_nic_parse_ipv6_info(skb, hash_engine, (u32 *)rss_tunple, &cnt); + convert_flag = ((l4_proto == IPPROTO_UDP) && rss_type.udp_ipv6) || + ((l4_proto == IPPROTO_TCP) && rss_type.tcp_ipv6); + } + + if (convert_flag) { + l4_hdr = skb_transport_header(skb); + rss_tunple[cnt++] = SSSNIC_RSS_VAL_BY_L4_PORT(l4_hdr); + } + + if (hash_engine == SSSNIC_RSS_ENGINE_TOEP) + sq_id = sss_nic_calc_toep_rss((u32 *)rss_tunple, cnt, nic_dev->rss_key_big); + else + sq_id = sss_nic_calc_xor_rss((u8 *)rss_tunple, cnt * (u32)sizeof(cnt)); + + return SSSNIC_GET_SQ_ID_BY_RSS_INDIR(nic_dev, sq_id); +} + +static inline u8 sss_nic_get_cos_by_dscp(struct sss_nic_dev *nic_dev, struct sk_buff *skb) +{ + int dscp_cp; + + dscp_cp = (skb->protocol == htons(ETH_P_IP)) ? SSSNIC_IPV4_DSF_TO_COS_ID(skb) : + (skb->protocol == htons(ETH_P_IPV6) ? SSSNIC_IPV6_DSF_TO_COS_ID(skb) : + nic_dev->hw_dcb_cfg.default_cos); + return nic_dev->hw_dcb_cfg.dscp2cos[dscp_cp]; +} + +static inline u8 sss_nic_get_cos_by_pcp(struct sss_nic_dev *nic_dev, + struct sk_buff *skb) +{ + return skb->vlan_tci ? + nic_dev->hw_dcb_cfg.pcp2cos[SSSNIC_VLAN_TCI_TO_COS_ID(skb)] : + nic_dev->hw_dcb_cfg.default_cos; +} + +u8 sss_nic_get_cos(struct sss_nic_dev *nic_dev, struct sk_buff *skb) +{ + if (nic_dev->hw_dcb_cfg.trust == DCB_PCP) + return sss_nic_get_cos_by_pcp(nic_dev, skb); + + return sss_nic_get_cos_by_dscp(nic_dev, skb); +} + +#ifdef NEED_VLAN_RESTORE +static int sss_nic_restore_vlan(struct sss_nic_dev *nic_dev) +{ + int ret = 0; +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + u16 i; + struct net_device *netdev = nic_dev->netdev; + struct net_device *vlandev = NULL; + + rcu_read_lock(); + for (i = 0; i < VLAN_N_VID; i++) { +#ifdef HAVE_VLAN_FIND_DEV_DEEP_RCU + vlandev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), i); +#else + vlandev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), i); +#endif + + if (!vlandev && SSSNIC_TEST_VLAN_BIT(nic_dev, i) != 0) { + ret = netdev->netdev_ops->ndo_vlan_rx_kill_vid(netdev, + htons(ETH_P_8021Q), i); + if (ret != 0) { + sss_nic_err(nic_dev, drv, + "Fail to delete vlan %u, ret: %d\n", i, ret); + break; + } + } else if (vlandev && SSSNIC_TEST_VLAN_BIT(nic_dev, i) == 0) { + ret = netdev->netdev_ops->ndo_vlan_rx_add_vid(netdev, + htons(ETH_P_8021Q), i); + if (ret != 0) { + sss_nic_err(nic_dev, drv, + "Fail to restore vlan %u, ret: %d\n", i, ret); + break; + } + } + } + rcu_read_unlock(); +#endif +#endif + return ret; +} +#endif + +static int sss_nic_set_lro_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature, netdev_features_t *fail_feature) +{ + int ret; + bool change = !!((new_feature ^ old_feature) & NETIF_F_LRO); + bool en = !!(new_feature & NETIF_F_LRO); + + if (!change) + return 0; + +#ifdef HAVE_XDP_SUPPORT + if (en && SSSNIC_IS_XDP_ENABLE(nic_dev)) { + *fail_feature |= NETIF_F_LRO; + sss_nic_err(nic_dev, drv, "Fail to enable LRO when xdp is enable\n"); + return -EINVAL; + } +#endif + ret = sss_nic_set_rx_lro_state(nic_dev, en, + SSSNIC_LRO_DEF_TIME_LIMIT, SSSNIC_LRO_DEF_COAL_PKT_SIZE); + if (ret != 0) { + *fail_feature |= NETIF_F_LRO; + sss_nic_err(nic_dev, drv, "Fail to set lro %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set lro %s\n", SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +static int sss_nic_set_rx_cvlan_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature, + netdev_features_t *fail_feature) +{ + int ret; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; +#else + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_RX; +#endif + bool change = !!((old_feature ^ new_feature) & vlan_feature); + bool en = !!(new_feature & vlan_feature); + + if (!change) + return 0; + + ret = sss_nic_set_rx_vlan_offload(nic_dev, en); + if (ret != 0) { + *fail_feature |= vlan_feature; + sss_nic_err(nic_dev, drv, "Fail to set %s rx vlan offload\n", + SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set rx vlan offload %s\n", + SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +static int sss_nic_set_vlan_filter_feature(struct sss_nic_dev *nic_dev, + netdev_features_t old_feature, + netdev_features_t new_feature, + netdev_features_t *fail_feature) +{ + int ret = 0; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + netdev_features_t filter_feature = NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + netdev_features_t filter_feature = NETIF_F_HW_VLAN_FILTER; +#endif + bool change = !!((new_feature ^ old_feature) & filter_feature); + bool en = !!(new_feature & filter_feature); + + if (!change) + return 0; + +#ifdef NEED_VLAN_RESTORE + if (en) { + ret = sss_nic_restore_vlan(nic_dev); + if (ret != 0) { + *fail_feature |= filter_feature; + sss_nic_err(nic_dev, drv, + "Fail to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + } +#endif + ret = sss_nic_set_vlan_fliter(nic_dev, en); + if (ret != 0) { + *fail_feature |= filter_feature; + sss_nic_err(nic_dev, drv, + "Fail to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +int sss_nic_set_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature) +{ + u32 ret = 0; + netdev_features_t fail_feature = 0; + + ret |= (u32)sss_nic_set_lro_feature(nic_dev, old_feature, new_feature, &fail_feature); + ret |= (u32)sss_nic_set_rx_cvlan_feature(nic_dev, old_feature, new_feature, &fail_feature); + ret |= (u32)sss_nic_set_vlan_filter_feature(nic_dev, old_feature, + new_feature, &fail_feature); + if (ret != 0) { + nic_dev->netdev->features = new_feature ^ fail_feature; + return -EIO; + } + + return 0; +} + +int sss_nic_enable_netdev_feature(struct sss_nic_dev *nic_dev) +{ + /* enable all feature in netdev->features */ + return sss_nic_set_feature(nic_dev, ~nic_dev->netdev->features, nic_dev->netdev->features); +} + +#ifdef IFLA_VF_MAX +int sss_nic_set_hw_vf_vlan(struct sss_nic_dev *nic_dev, + u16 cur_vlanprio, int vf_id, u16 vlan_id, u8 qos) +{ + int ret = 0; + u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; + + if (vlan_id == 0 && qos == 0) { + ret = sss_nic_destroy_vf_vlan(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + } else { + if (cur_vlanprio != 0) { + ret = sss_nic_destroy_vf_vlan(nic_dev->nic_io, + SSSNIC_OS_VF_ID_TO_HW(vf_id)); + if (ret != 0) + return ret; + } + ret = sss_nic_create_vf_vlan(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id), + vlan_id, qos); + } + + ret = sss_nic_update_mac_vlan(nic_dev, old_vlan, vlan_id, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + return ret; +} +#endif + +#ifdef HAVE_XDP_SUPPORT +static void sss_nic_put_prog(struct sss_nic_dev *nic_dev, struct bpf_prog *prog) +{ + int i; + struct bpf_prog *pre_prog = NULL; + + pre_prog = xchg(&nic_dev->xdp_prog, prog); + for (i = 0; i < nic_dev->max_qp_num; i++) + xchg(&nic_dev->rq_desc_group[i].xdp_prog, nic_dev->xdp_prog); + + if (pre_prog) + bpf_prog_put(pre_prog); +} + +#ifdef HAVE_NDO_BPF_NETDEV_BPF +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_bpf *xdp) +#else +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_xdp *xdp) +#endif +{ + struct net_device *netdev = nic_dev->netdev; + struct netlink_ext_ack *extack = xdp->extack; + int xdp_max_mtu = SSSNIC_XDP_MAX_MTU(nic_dev); + + if (netdev->mtu > xdp_max_mtu) { + NL_SET_ERR_MSG_MOD(extack, "Invalid mtu for loading xdp program"); + nicif_err(nic_dev, drv, netdev, + "Fail to setup xdp, netdev mtu %d is larger than xdp allowed mtu %d\n", + netdev->mtu, xdp_max_mtu); + + return -EINVAL; + } + + if ((netdev->features & NETIF_F_LRO) != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Fail to setup xdp when LRO is on\n"); + nicif_err(nic_dev, drv, netdev, + "Fail to setup xdp when LRO is on\n"); + + return -EINVAL; + } + + sss_nic_put_prog(nic_dev, xdp->prog); + + return 0; +} + +void sss_nic_get_tx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_sq_stats *sq_stats = NULL; + unsigned int start; + int qid; + + stats->tx_bytes = 0; + stats->tx_packets = 0; + stats->tx_dropped = 0; + + if (!nic_dev->sq_desc_group) + return; + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_stats = &sq_desc->stats; + do { + start = u64_stats_fetch_begin(&sq_stats->stats_sync); + stats->tx_dropped += sq_stats->tx_dropped; + stats->tx_packets += sq_stats->tx_packets; + stats->tx_bytes += sq_stats->tx_bytes; + } while (u64_stats_fetch_retry(&sq_stats->stats_sync, start)); + } +} + +void sss_nic_get_rx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + struct sss_nic_rq_stats *rq_stats = NULL; + unsigned int start; + int qid; + + stats->rx_errors = 0; + stats->rx_dropped = 0; + stats->rx_packets = 0; + stats->rx_bytes = 0; + + if (!nic_dev->rq_desc_group) + return; + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + rq_stats = &rq_desc->stats; + do { + start = u64_stats_fetch_begin(&rq_stats->stats_sync); + stats->rx_dropped += rq_stats->rx_dropped; + stats->rx_errors += rq_stats->csum_errors + + rq_stats->other_errors; + stats->rx_packets += rq_stats->rx_packets; + stats->rx_bytes += rq_stats->rx_bytes; + } while (u64_stats_fetch_retry(&rq_stats->stats_sync, start)); + } +} +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h new file mode 100644 index 00000000000000..bb8bfce43c01c0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NETDEV_OPS_API_H +#define SSS_NIC_NETDEV_OPS_API_H + +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_io_define.h" + +typedef void (*sss_nic_reopen_handler_t)(struct sss_nic_dev *nic_dev, + const void *priv_data); + +int sss_nic_dev_resource_init(struct sss_nic_dev *nic_dev); +void sss_nic_dev_resource_deinit(struct sss_nic_dev *nic_dev); +int sss_nic_qp_resource_init(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +void sss_nic_qp_resource_deinit(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +int sss_nic_open_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +void sss_nic_close_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info); +int sss_nic_vport_up(struct sss_nic_dev *nic_dev); +void sss_nic_vport_down(struct sss_nic_dev *nic_dev); +int sss_nic_update_channel_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res, + sss_nic_reopen_handler_t reopen_handler, + const void *priv_data); +u16 sss_nic_select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int num_tx_queues); +u8 sss_nic_get_cos(struct sss_nic_dev *nic_dev, struct sk_buff *skb); +int sss_nic_set_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature); + +int sss_nic_enable_netdev_feature(struct sss_nic_dev *nic_dev); + +#ifdef IFLA_VF_MAX +int sss_nic_set_hw_vf_vlan(struct sss_nic_dev *nic_dev, + u16 cur_vlanprio, int vf, u16 vlan, u8 qos); +#endif + +#ifdef HAVE_XDP_SUPPORT +#define SSSNIC_XDP_MAX_MTU(nic_dev) ((nic_dev)->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)) +#ifdef HAVE_NDO_BPF_NETDEV_BPF +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_bpf *xdp); +#else +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_xdp *xdp); +#endif +#endif +void sss_nic_get_tx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats); +void sss_nic_get_rx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats); + +u32 sss_nic_get_io_stats_size(const struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c new file mode 100644 index 00000000000000..cd04fc74ef69f3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_ntuple.h" + +#define SSSNIC_MAX_ETHTOOL_NTUPLE_RULE BIT(9) + +#define SSSNIC_TCAM_IP_TYPE_MASK 0x1 +#define SSSNIC_TCAM_TUNNEL_TYPE_MASK 0xF +#define SSSNIC_TCAM_FUNC_ID_MASK 0x7FFF + +#define SSSNIC_TCAM_IPV4_TYPE 0 +#define SSSNIC_TCAM_IPV6_TYPE 1 + +#ifndef UNSUPPORT_NTUPLE_IPV6 +enum sss_nic_ipv6_parse_res { + SSSNIC_IPV6_MASK_INVALID, + SSSNIC_IPV6_MASK_ALL_MASK, + SSSNIC_IPV6_MASK_ALL_ZERO, +}; + +enum sss_nic_ipv6_index { + SSSNIC_IPV6_ID0, + SSSNIC_IPV6_ID1, + SSSNIC_IPV6_ID2, + SSSNIC_IPV6_ID3, +}; +#endif + +struct sss_nic_ethtool_rx_flow_rule { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; +}; + +static void sss_nic_calculate_tcam_key_y(u8 *key_y, const u8 *src_input, const u8 *mask, u8 len) +{ + u8 id; + + for (id = 0; id < len; id++) + key_y[id] = src_input[id] & mask[id]; +} + +static void sss_nic_calculate_tcam_key_x(u8 *key_x, const u8 *key_y, const u8 *mask, u8 len) +{ + u8 id; + + for (id = 0; id < len; id++) + key_x[id] = key_y[id] ^ mask[id]; +} + +static void sss_nic_calculate_tcam_key(struct sss_nic_tcam_key_tag *tcam_key, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + sss_nic_calculate_tcam_key_y(fdir_tcam_rule->key.key_y, + (u8 *)(&tcam_key->key_info_ipv4), + (u8 *)(&tcam_key->key_mask_ipv4), SSSNIC_TCAM_FLOW_KEY_SIZE); + sss_nic_calculate_tcam_key_x(fdir_tcam_rule->key.key_x, fdir_tcam_rule->key.key_y, + (u8 *)(&tcam_key->key_mask_ipv4), SSSNIC_TCAM_FLOW_KEY_SIZE); +} + +static int sss_nic_parse_ipv4_base(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + u32 temp; + struct ethtool_tcpip4_spec *val = &flow_spec->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *mask = &flow_spec->m_u.tcp_ip4_spec; + + if (mask->ip4src == U32_MAX) { + temp = ntohl(val->ip4src); + tcam_key->key_info_ipv4.sipv4_l = low_16_bits(temp); + tcam_key->key_info_ipv4.sipv4_h = high_16_bits(temp); + + tcam_key->key_mask_ipv4.sipv4_l = U16_MAX; + tcam_key->key_mask_ipv4.sipv4_h = U16_MAX; + + } else if (mask->ip4src != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid source ip mask\n"); + return -EINVAL; + } + + if (mask->ip4dst == U32_MAX) { + temp = ntohl(val->ip4dst); + tcam_key->key_info_ipv4.dipv4_l = low_16_bits(temp); + tcam_key->key_info_ipv4.dipv4_h = high_16_bits(temp); + + tcam_key->key_mask_ipv4.dipv4_l = U16_MAX; + tcam_key->key_mask_ipv4.dipv4_h = U16_MAX; + + } else if (mask->ip4dst != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid destination ip mask\n"); + return -EINVAL; + } + + tcam_key->key_mask_ipv4.ip_type = SSSNIC_TCAM_IP_TYPE_MASK; + tcam_key->key_info_ipv4.ip_type = SSSNIC_TCAM_IPV4_TYPE; + + tcam_key->key_info_ipv4.func_id = sss_get_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv4.func_id = SSSNIC_TCAM_FUNC_ID_MASK; + + return 0; +} + +static int sss_nic_init_ipv4_l4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + struct ethtool_tcpip4_spec *l4_val = &flow_spec->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *l4_mask = &flow_spec->m_u.tcp_ip4_spec; + int ret; + + ret = sss_nic_parse_ipv4_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_info_ipv4.dport = ntohs(l4_val->pdst); + tcam_key->key_mask_ipv4.dport = l4_mask->pdst; + + tcam_key->key_info_ipv4.sport = ntohs(l4_val->psrc); + tcam_key->key_mask_ipv4.sport = l4_mask->psrc; + + tcam_key->key_mask_ipv4.ip_proto = U8_MAX; + if (flow_spec->flow_type == TCP_V4_FLOW) + tcam_key->key_info_ipv4.ip_proto = IPPROTO_TCP; + else + tcam_key->key_info_ipv4.ip_proto = IPPROTO_UDP; + + return 0; +} + +static int sss_nic_init_ipv4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_usrip4_spec *l3_val = &flow_spec->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *l3_mask = &flow_spec->m_u.usr_ip4_spec; + + ret = sss_nic_parse_ipv4_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv4.ip_proto = l3_mask->proto; + tcam_key->key_info_ipv4.ip_proto = l3_val->proto; + + return 0; +} + +#ifndef UNSUPPORT_NTUPLE_IPV6 +static int sss_nic_parse_ipv6_mask(const u32 *ipv6_mask) +{ + if (ipv6_mask[SSSNIC_IPV6_ID0] == 0 && ipv6_mask[SSSNIC_IPV6_ID1] == 0 && + ipv6_mask[SSSNIC_IPV6_ID2] == 0 && ipv6_mask[SSSNIC_IPV6_ID3] == 0) + return SSSNIC_IPV6_MASK_ALL_ZERO; + + if (ipv6_mask[SSSNIC_IPV6_ID0] == U32_MAX && + ipv6_mask[SSSNIC_IPV6_ID1] == U32_MAX && + ipv6_mask[SSSNIC_IPV6_ID2] == U32_MAX && ipv6_mask[SSSNIC_IPV6_ID3] == U32_MAX) + return SSSNIC_IPV6_MASK_ALL_MASK; + + return SSSNIC_IPV6_MASK_INVALID; +} + +static int sss_nic_parse_ipv6_base(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int parse_res; + u32 temp; + struct ethtool_tcpip6_spec *val = &flow_spec->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *mask = &flow_spec->m_u.tcp_ip6_spec; + + parse_res = sss_nic_parse_ipv6_mask((u32 *)mask->ip6src); + if (parse_res == SSSNIC_IPV6_MASK_ALL_MASK) { + tcam_key->key_mask_ipv6.sipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key7 = U16_MAX; + + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID0]); + tcam_key->key_info_ipv6.sipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID1]); + tcam_key->key_info_ipv6.sipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID2]); + tcam_key->key_info_ipv6.sipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID3]); + tcam_key->key_info_ipv6.sipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key7 = low_16_bits(temp); + + } else if (parse_res == SSSNIC_IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid src_ipv6 mask\n"); + return -EINVAL; + } + + parse_res = sss_nic_parse_ipv6_mask((u32 *)mask->ip6dst); + if (parse_res == SSSNIC_IPV6_MASK_ALL_MASK) { + tcam_key->key_mask_ipv6.dipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key7 = U16_MAX; + + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID0]); + tcam_key->key_info_ipv6.dipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID1]); + tcam_key->key_info_ipv6.dipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID2]); + tcam_key->key_info_ipv6.dipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID3]); + tcam_key->key_info_ipv6.dipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key7 = low_16_bits(temp); + + } else if (parse_res == SSSNIC_IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid dst_ipv6 mask\n"); + return -EINVAL; + } + + tcam_key->key_mask_ipv6.ip_type = SSSNIC_TCAM_IP_TYPE_MASK; + tcam_key->key_info_ipv6.ip_type = SSSNIC_TCAM_IPV6_TYPE; + + tcam_key->key_info_ipv6.func_id = + sss_get_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv6.func_id = SSSNIC_TCAM_FUNC_ID_MASK; + + return 0; +} + +static int sss_nic_init_ipv6_l4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_tcpip6_spec *l4_val = &flow_spec->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *l4_mask = &flow_spec->m_u.tcp_ip6_spec; + + ret = sss_nic_parse_ipv6_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv6.dport = l4_mask->pdst; + tcam_key->key_info_ipv6.dport = ntohs(l4_val->pdst); + + tcam_key->key_mask_ipv6.sport = l4_mask->psrc; + tcam_key->key_info_ipv6.sport = ntohs(l4_val->psrc); + + tcam_key->key_mask_ipv6.ip_proto = U8_MAX; + if (flow_spec->flow_type == TCP_V6_FLOW) + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_TCP; + else + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_UDP; + + return 0; +} + +static int sss_nic_init_ipv6_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_usrip6_spec *l3_mask = &flow_spec->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *l3_val = &flow_spec->h_u.usr_ip6_spec; + + ret = sss_nic_parse_ipv6_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv6.ip_proto = l3_mask->l4_proto; + tcam_key->key_info_ipv6.ip_proto = l3_val->l4_proto; + + return 0; +} +#endif + +static int sss_nic_init_fdir_tcam_info(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + int ret; + + switch (flow_spec->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = sss_nic_init_ipv4_l4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; + case IP_USER_FLOW: + ret = sss_nic_init_ipv4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = sss_nic_init_ipv6_l4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; + case IPV6_USER_FLOW: + ret = sss_nic_init_ipv6_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; +#endif + default: + return -EOPNOTSUPP; + } + + tcam_key->key_mask_ipv4.tunnel_type = SSSNIC_TCAM_TUNNEL_TYPE_MASK; + tcam_key->key_info_ipv4.tunnel_type = 0; + + fdir_tcam_rule->data.qid = (u32)flow_spec->ring_cookie; + sss_nic_calculate_tcam_key(tcam_key, fdir_tcam_rule); + + return 0; +} + +static void sss_nic_flush_tcam_list(struct sss_nic_tcam_info *tcam_info) +{ + struct sss_nic_tcam_filter *filter_tmp = NULL; + struct sss_nic_tcam_filter *filter = NULL; + struct list_head *tcam_list = &tcam_info->tcam_list; + + if (list_empty(tcam_list)) + return; + + list_for_each_entry_safe(filter, filter_tmp, + tcam_list, tcam_filter_list) { + list_del(&filter->tcam_filter_list); + kfree(filter); + } +} + +static void sss_nic_flush_tcam_node_list(struct sss_nic_tcam_info *tcam_info) +{ + struct sss_nic_tcam_node *block_tmp = NULL; + struct sss_nic_tcam_node *block = NULL; + struct list_head *dynamic_list = + &tcam_info->tcam_node_info.tcam_node_list; + + if (list_empty(dynamic_list)) + return; + + list_for_each_entry_safe(block, block_tmp, dynamic_list, block_list) { + list_del(&block->block_list); + kfree(block); + } +} + +static void sss_nic_flush_rx_flow_rule(struct sss_nic_rx_rule *rx_flow_rule) +{ + struct sss_nic_ethtool_rx_flow_rule *rule_tmp = NULL; + struct sss_nic_ethtool_rx_flow_rule *rule = NULL; + struct list_head *rule_list = &rx_flow_rule->rule_list; + + if (list_empty(rule_list)) + return; + + list_for_each_entry_safe(rule, rule_tmp, rule_list, list) { + list_del(&rule->list); + kfree(rule); + } +} + +void sss_nic_flush_tcam(struct sss_nic_dev *nic_dev) +{ + sss_nic_flush_tcam_list(&nic_dev->tcam_info); + + sss_nic_flush_tcam_node_list(&nic_dev->tcam_info); + + sss_nic_flush_rx_flow_rule(&nic_dev->rx_rule); + + if (SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + sss_nic_flush_tcam_rule(nic_dev); + sss_nic_set_fdir_tcam_rule_filter(nic_dev, false); + } +} + +static struct sss_nic_tcam_node * +sss_nic_alloc_tcam_block_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_info *nic_tcam_info, + u16 block_id) +{ + struct sss_nic_tcam_node *dynamic_block_ptr = NULL; + + dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); + if (!dynamic_block_ptr) + return NULL; + + dynamic_block_ptr->block_id = block_id; + list_add_tail(&dynamic_block_ptr->block_list, + &nic_tcam_info->tcam_node_info.tcam_node_list); + + nic_tcam_info->tcam_node_info.block_cnt++; + + return dynamic_block_ptr; +} + +static void sss_nic_free_tcam_block_resource(struct sss_nic_tcam_info *nic_tcam_info, + struct sss_nic_tcam_node *block_ptr) +{ + if (!block_ptr) + return; + + list_del(&block_ptr->block_list); + kfree(block_ptr); + + nic_tcam_info->tcam_node_info.block_cnt--; +} + +static struct sss_nic_tcam_node * +sss_nic_dynamic_lookup_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule, + const struct sss_nic_tcam_info *tcam_info, + struct sss_nic_tcam_filter *tcam_filter, + u16 *tcam_index) +{ + u16 index; + struct sss_nic_tcam_node *ptr = NULL; + + list_for_each_entry(ptr, + &tcam_info->tcam_node_info.tcam_node_list, + block_list) + if (ptr->index_cnt < SSSNIC_TCAM_BLOCK_SIZE) + break; + + if (!ptr || ptr->index_cnt >= SSSNIC_TCAM_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to lookup index for fdir filter dynamic\n"); + return NULL; + } + + for (index = 0; index < SSSNIC_TCAM_BLOCK_SIZE; index++) + if (ptr->index_used[index] == 0) + break; + + if (index == SSSNIC_TCAM_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "tcam block 0x%x supports filter rules is full\n", + ptr->block_id); + return NULL; + } + + tcam_filter->block_id = ptr->block_id; + tcam_filter->index = index; + *tcam_index = index; + + fdir_tcam_rule->index = index + + SSSNIC_PKT_TCAM_INDEX_START(ptr->block_id); + + return ptr; +} + +static int sss_nic_add_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_filter *tcam_filter, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + struct sss_nic_tcam_node *dynamic_block_ptr = NULL; + struct sss_nic_tcam_node *tmp = NULL; + u16 block_cnt = tcam_info->tcam_node_info.block_cnt; + u16 tcam_block_index = 0; + int block_alloc_flag = 0; + u16 index = 0; + + if (tcam_info->tcam_rule_num >= + block_cnt * SSSNIC_TCAM_BLOCK_SIZE) { + if (block_cnt >= (SSSNIC_TCAM_FILTERS_MAX / + SSSNIC_TCAM_BLOCK_SIZE)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc, dynamic tcam block is full\n"); + goto failed; + } + + ret = sss_nic_alloc_tcam_block(nic_dev, &tcam_block_index); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to fdir filter dynamic tcam alloc block\n"); + goto failed; + } + + block_alloc_flag = 1; + + dynamic_block_ptr = + sss_nic_alloc_tcam_block_resource(nic_dev, tcam_info, + tcam_block_index); + if (!dynamic_block_ptr) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to Fdir filter dynamic alloc block memory\n"); + goto block_alloc_failed; + } + } + + tmp = sss_nic_dynamic_lookup_tcam_filter(nic_dev, + fdir_tcam_rule, tcam_info, + tcam_filter, &index); + if (!tmp) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to dynamic lookup tcam filter\n"); + goto lookup_tcam_index_failed; + } + + ret = sss_nic_add_tcam_rule(nic_dev, fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to add fdir_tcam_rule\n"); + goto add_tcam_rules_failed; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Add fdir tcam rule, func_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, queue: %d, tcam_rule_num: %d succeed\n", + sss_get_global_func_id(nic_dev->hwdev), + tcam_filter->block_id, index, fdir_tcam_rule->index, + fdir_tcam_rule->data.qid, tcam_info->tcam_rule_num + 1); + + if (tcam_info->tcam_rule_num == 0) { + ret = sss_nic_set_fdir_tcam_rule_filter(nic_dev, true); + if (ret != 0) + goto enable_failed; + } + + list_add_tail(&tcam_filter->tcam_filter_list, &tcam_info->tcam_list); + + tmp->index_used[index] = 1; + tmp->index_cnt++; + + tcam_info->tcam_rule_num++; + + return 0; + +enable_failed: + sss_nic_del_tcam_rule(nic_dev, fdir_tcam_rule->index); + +add_tcam_rules_failed: +lookup_tcam_index_failed: + if (block_alloc_flag == 1) + sss_nic_free_tcam_block_resource(tcam_info, + dynamic_block_ptr); + +block_alloc_failed: + if (block_alloc_flag == 1) + sss_nic_free_tcam_block(nic_dev, &tcam_block_index); + +failed: + return -EFAULT; +} + +static int sss_nic_del_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_filter *tcam_filter) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + u16 block_id = tcam_filter->block_id; + struct sss_nic_tcam_node *ptr = NULL; + u32 index = 0; + + list_for_each_entry(ptr, + &tcam_info->tcam_node_info.tcam_node_list, + block_list) { + if (ptr->block_id == block_id) + break; + } + if (!ptr || ptr->block_id != block_id) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to lookup block for fdir filter del dynamic\n"); + return -EFAULT; + } + + index = SSSNIC_PKT_TCAM_INDEX_START(ptr->block_id) + + tcam_filter->index; + + ret = sss_nic_del_tcam_rule(nic_dev, index); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to del fdir_tcam_rule\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Del fdir_tcam_dynamic_rule func_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, local_rules_nums: %d, global_rule_nums: %d succeed\n", + sss_get_global_func_id(nic_dev->hwdev), block_id, + tcam_filter->index, index, ptr->index_cnt - 1, + tcam_info->tcam_rule_num - 1); + + ptr->index_used[tcam_filter->index] = 0; + ptr->index_cnt--; + tcam_info->tcam_rule_num--; + if (ptr->index_cnt == 0) { + sss_nic_free_tcam_block(nic_dev, &block_id); + sss_nic_free_tcam_block_resource(tcam_info, ptr); + } + + if (tcam_info->tcam_rule_num == 0) + sss_nic_set_fdir_tcam_rule_filter(nic_dev, false); + + list_del(&tcam_filter->tcam_filter_list); + kfree(tcam_filter); + + return 0; +} + +static inline struct sss_nic_tcam_filter * +sss_nic_lookup_tcam_filter(const struct list_head *filter_list, + struct sss_nic_tcam_key_tag *key) +{ + struct sss_nic_tcam_filter *ptr; + + list_for_each_entry(ptr, filter_list, tcam_filter_list) { + if (memcmp(key, &ptr->tcam_key, + sizeof(*key)) == 0) + return ptr; + } + + return NULL; +} + +static void sss_nic_del_ethtool_rule(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *eth_rule) +{ + list_del(ð_rule->list); + nic_dev->rx_rule.rule_cnt--; + + kfree(eth_rule); +} + +static int sss_nic_del_one_rule(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *eth_rule) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + struct sss_nic_tcam_filter *tcam_filter; + struct sss_nic_tcam_rule_cfg fdir_tcam_rule = {0}; + struct sss_nic_tcam_key_tag tcam_key = {0}; + + ret = sss_nic_init_fdir_tcam_info(nic_dev, ð_rule->flow_spec, + &tcam_key, &fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init fdir info\n"); + return ret; + } + + tcam_filter = sss_nic_lookup_tcam_filter(&tcam_info->tcam_list, + &tcam_key); + if (!tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter does not exists\n"); + return -EEXIST; + } + + ret = sss_nic_del_tcam_filter(nic_dev, tcam_filter); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to delete tcam filter\n"); + return ret; + } + + sss_nic_del_ethtool_rule(nic_dev, eth_rule); + + return 0; +} + +static void sss_nic_add_rule_to_list(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *rule) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + struct list_head *head = &nic_dev->rx_rule.rule_list; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.location > rule->flow_spec.location) + break; + head = &ptr->list; + } + nic_dev->rx_rule.rule_cnt++; + list_add(&rule->list, head); +} + +static int sss_nic_add_one_rule(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec) +{ + int ret; + struct sss_nic_tcam_key_tag tcam_key = {0}; + struct sss_nic_tcam_rule_cfg fdir_tcam_rule = {0}; + struct sss_nic_tcam_filter *tcam_filter = NULL; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + + ret = sss_nic_init_fdir_tcam_info(nic_dev, flow_spec, &tcam_key, + &fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init fdir info\n"); + return ret; + } + + tcam_filter = sss_nic_lookup_tcam_filter(&tcam_info->tcam_list, + &tcam_key); + if (tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter exists\n"); + return -EEXIST; + } + + tcam_filter = kzalloc(sizeof(*tcam_filter), GFP_KERNEL); + if (!tcam_filter) + return -ENOMEM; + memcpy(&tcam_filter->tcam_key, + &tcam_key, sizeof(tcam_key)); + tcam_filter->qid = (u16)fdir_tcam_rule.data.qid; + + ret = sss_nic_add_tcam_filter(nic_dev, tcam_filter, &fdir_tcam_rule); + if (ret != 0) + goto add_tcam_filter_fail; + + /* driver save new rule filter */ + eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); + if (!eth_rule) { + ret = -ENOMEM; + goto alloc_eth_rule_fail; + } + + eth_rule->flow_spec = *flow_spec; + sss_nic_add_rule_to_list(nic_dev, eth_rule); + + return 0; + +alloc_eth_rule_fail: + sss_nic_del_tcam_filter(nic_dev, tcam_filter); +add_tcam_filter_fail: + kfree(tcam_filter); + return ret; +} + +static struct sss_nic_ethtool_rx_flow_rule * +sss_nic_ethtool_find_rule(const struct sss_nic_dev *nic_dev, u32 location) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.location == location) + return ptr; + } + return NULL; +} + +static int sss_nic_validate_flow(struct sss_nic_dev *nic_dev, + const struct ethtool_rx_flow_spec *flow_spec) +{ + int i; + u32 flow_type[] = { + TCP_V4_FLOW, UDP_V4_FLOW, IP_USER_FLOW, +#ifndef UNSUPPORT_NTUPLE_IPV6 + TCP_V6_FLOW, UDP_V6_FLOW, IPV6_USER_FLOW, +#endif + }; + + if (flow_spec->ring_cookie >= nic_dev->qp_res.qp_num) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Action larger than queue number %u\n", + nic_dev->qp_res.qp_num); + return -EINVAL; + } + + if (flow_spec->location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid location out of range: [0,%lu]\n", + SSSNIC_MAX_ETHTOOL_NTUPLE_RULE); + return -EINVAL; + } + + for (i = 0; i < ARRAY_LEN(flow_type); i++) { + if (flow_spec->flow_type == flow_type[i]) + return 0; + } + + nicif_err(nic_dev, drv, nic_dev->netdev, "flow type not supported\n"); + return -EOPNOTSUPP; +} + +int sss_nic_ethtool_update_flow(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec) +{ + int ret; + struct ethtool_rx_flow_spec flow_spec_temp; + int loc_exit_flag = 0; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport ntuple function\n"); + return -EOPNOTSUPP; + } + + ret = sss_nic_validate_flow(nic_dev, flow_spec); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "flow is not valid %d\n", ret); + return ret; + } + + eth_rule = sss_nic_ethtool_find_rule(nic_dev, flow_spec->location); + /* when location is same, delete old location rule. */ + if (eth_rule) { + memcpy(&flow_spec_temp, ð_rule->flow_spec, + sizeof(flow_spec_temp)); + ret = sss_nic_del_one_rule(nic_dev, eth_rule); + if (ret != 0) + return ret; + + loc_exit_flag = 1; + } + + /* add new rule filter */ + ret = sss_nic_add_one_rule(nic_dev, flow_spec); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to add new rule filter\n"); + if (loc_exit_flag) + sss_nic_add_one_rule(nic_dev, &flow_spec_temp); + + return -ENOENT; + } + + return 0; +} + +int sss_nic_ethtool_delete_flow(struct sss_nic_dev *nic_dev, u32 location) +{ + int ret; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) + return -ENOSPC; + + eth_rule = sss_nic_ethtool_find_rule(nic_dev, location); + if (!eth_rule) + return -ENOENT; + + ret = sss_nic_del_one_rule(nic_dev, eth_rule); + + return ret; +} + +int sss_nic_ethtool_get_flow(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location) +{ + struct sss_nic_ethtool_rx_flow_rule *nic_eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) + return -EINVAL; + + list_for_each_entry(nic_eth_rule, &nic_dev->rx_rule.rule_list, list) { + if (nic_eth_rule->flow_spec.location == location) { + info->fs = nic_eth_rule->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int sss_nic_ethtool_get_all_flows(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + int id = 0; + struct sss_nic_ethtool_rx_flow_rule *nic_eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + info->data = SSSNIC_MAX_ETHTOOL_NTUPLE_RULE; + list_for_each_entry(nic_eth_rule, &nic_dev->rx_rule.rule_list, list) + rule_locs[id++] = nic_eth_rule->flow_spec.location; + + return info->rule_cnt == id ? 0 : -ENOENT; +} + +bool sss_nic_validate_channel_setting_in_ntuple(const struct sss_nic_dev *nic_dev, u32 q_num) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.ring_cookie >= q_num) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "User defined filter %u assigns flow to queue %llu. Queue number %u is Invalid\n", + ptr->flow_spec.location, ptr->flow_spec.ring_cookie, q_num); + return false; + } + } + + return true; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h new file mode 100644 index 00000000000000..3712434b051032 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NTUPLE_H +#define SSS_NIC_NTUPLE_H + +#include +#include + +#include "sss_nic_dev_define.h" + +void sss_nic_flush_tcam(struct sss_nic_dev *nic_dev); + +int sss_nic_ethtool_update_flow(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs); + +int sss_nic_ethtool_delete_flow(struct sss_nic_dev *nic_dev, u32 location); + +int sss_nic_ethtool_get_flow(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location); + +int sss_nic_ethtool_get_all_flows(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); + +bool sss_nic_validate_channel_setting_in_ntuple(const struct sss_nic_dev *nic_dev, u32 q_num); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c new file mode 100644 index 00000000000000..eb00a311597c73 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c @@ -0,0 +1,1002 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_ntuple.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_dcb.h" + +#define SSSNIC_INVALID_TC_ID 0xFF + +#define SSSNIC_DEF_RSS_KEY_0 0x6d +#define SSSNIC_DEF_RSS_KEY_1 0x5a +#define SSSNIC_DEF_RSS_KEY_2 0x56 +#define SSSNIC_DEF_RSS_KEY_3 0xda +#define SSSNIC_DEF_RSS_KEY_4 0x25 +#define SSSNIC_DEF_RSS_KEY_5 0x5b +#define SSSNIC_DEF_RSS_KEY_6 0x0e +#define SSSNIC_DEF_RSS_KEY_7 0xc2 +#define SSSNIC_DEF_RSS_KEY_8 0x41 +#define SSSNIC_DEF_RSS_KEY_9 0x67 +#define SSSNIC_DEF_RSS_KEY_10 0x25 +#define SSSNIC_DEF_RSS_KEY_11 0x3d +#define SSSNIC_DEF_RSS_KEY_12 0x43 +#define SSSNIC_DEF_RSS_KEY_13 0xa3 +#define SSSNIC_DEF_RSS_KEY_14 0x8f +#define SSSNIC_DEF_RSS_KEY_15 0xb0 +#define SSSNIC_DEF_RSS_KEY_16 0xd0 +#define SSSNIC_DEF_RSS_KEY_17 0xca +#define SSSNIC_DEF_RSS_KEY_18 0x2b +#define SSSNIC_DEF_RSS_KEY_19 0xcb +#define SSSNIC_DEF_RSS_KEY_20 0xae +#define SSSNIC_DEF_RSS_KEY_21 0x7b +#define SSSNIC_DEF_RSS_KEY_22 0x30 +#define SSSNIC_DEF_RSS_KEY_23 0xb4 +#define SSSNIC_DEF_RSS_KEY_24 0x77 +#define SSSNIC_DEF_RSS_KEY_25 0xcb +#define SSSNIC_DEF_RSS_KEY_26 0x2d +#define SSSNIC_DEF_RSS_KEY_27 0xa3 +#define SSSNIC_DEF_RSS_KEY_28 0x80 +#define SSSNIC_DEF_RSS_KEY_29 0x30 +#define SSSNIC_DEF_RSS_KEY_30 0xf2 +#define SSSNIC_DEF_RSS_KEY_31 0x0c +#define SSSNIC_DEF_RSS_KEY_32 0x6a +#define SSSNIC_DEF_RSS_KEY_33 0x42 +#define SSSNIC_DEF_RSS_KEY_34 0xb7 +#define SSSNIC_DEF_RSS_KEY_35 0x3b +#define SSSNIC_DEF_RSS_KEY_36 0xbe +#define SSSNIC_DEF_RSS_KEY_37 0xac +#define SSSNIC_DEF_RSS_KEY_38 0x01 +#define SSSNIC_DEF_RSS_KEY_39 0xfa + +#define SSSNIC_COS_CHANGE_OFFSET 4 + +#define SSSNIC_RXH_PORT (RXH_L4_B_0_1 | RXH_L4_B_2_3) +#define SSSNIC_RXH_IP (RXH_IP_DST | RXH_IP_SRC) +#define SSSNIC_SUPPORT_RXH (SSSNIC_RXH_IP | SSSNIC_RXH_PORT) + +static int sss_nic_set_hw_rss(struct net_device *netdev, u8 *cos_map, u8 cos_num); + +static u16 max_qp_num; +module_param(max_qp_num, ushort, 0444); +MODULE_PARM_DESC(max_qp_num, "Number of Queue Pairs (default=0)"); + +static void sss_nic_fill_indir_tbl(struct sss_nic_dev *nic_dev, u8 cos_num, u32 *indir) +{ + int i = 0; + u16 k; + u16 group_size; + u16 start_qid = 0; + u16 qp_num = 0; + u8 cur_cos = 0; + u8 j; + u8 default_cos; + u8 cos_map = sss_nic_get_valid_cos_map(nic_dev); + + if (cos_num == 0) { + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir[i] = i % nic_dev->qp_res.qp_num; + return; + } + + group_size = SSSNIC_RSS_INDIR_SIZE / cos_num; + for (j = 0; j < cos_num; j++) { + while (cur_cos < SSSNIC_DCB_COS_MAX && + nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos] == 0) + cur_cos++; + + if (cur_cos < SSSNIC_DCB_COS_MAX) { + qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos]; + start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[cur_cos]; + } else { + if (BIT(nic_dev->hw_dcb_cfg.default_cos) & cos_map) + default_cos = nic_dev->hw_dcb_cfg.default_cos; + else + default_cos = (u8)fls(cos_map) - 1; + qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[default_cos]; + start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[default_cos]; + } + + for (k = 0; k < group_size; k++) + indir[i++] = start_qid + k % qp_num; + + cur_cos++; + } +} + +static void sss_nic_get_dcb_cos_map(struct sss_nic_dev *nic_dev, + u8 *cos_map, u8 *cos_num) +{ + u8 i; + u8 num; + u8 cfg_map[SSSNIC_DCB_UP_MAX]; + bool dcb_en = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + if (!dcb_en) + return; + + if (nic_dev->hw_dcb_cfg.trust == 0) { + memcpy(cfg_map, nic_dev->hw_dcb_cfg.pcp2cos, sizeof(cfg_map)); + } else if (nic_dev->hw_dcb_cfg.trust == 1) { + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) + cfg_map[i] = nic_dev->hw_dcb_cfg.dscp2cos[i * SSSNIC_DCB_DSCP_NUM]; + } + + for (i = 0; i < SSSNIC_COS_CHANGE_OFFSET; i++) + cos_map[SSSNIC_COS_CHANGE_OFFSET + i] = cfg_map[i]; + + for (i = 0; i < SSSNIC_COS_CHANGE_OFFSET; i++) + cos_map[i] = cfg_map[SSSNIC_DCB_UP_MAX - (i + 1)]; + + num = sss_nic_get_user_cos_num(nic_dev); + while (num & (num - 1)) + num++; + + *cos_num = num; +} + +int sss_nic_update_rss_cfg(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 cos_num = 0; + u8 cos_map[SSSNIC_DCB_UP_MAX] = {0}; + struct net_device *netdev = nic_dev->netdev; + + sss_nic_get_dcb_cos_map(nic_dev, cos_map, &cos_num); + + ret = sss_nic_set_hw_rss(netdev, cos_map, cos_num); + if (ret != 0) + return ret; + + return ret; +} + +void sss_nic_reset_rss_cfg(struct sss_nic_dev *nic_dev) +{ + u8 cos_map[SSSNIC_DCB_UP_MAX] = {0}; + + sss_nic_config_rss_to_hw(nic_dev, 0, cos_map, 1, 0); +} + +static void sss_nic_init_rss_type(struct sss_nic_dev *nic_dev) +{ + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.udp_ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_hash_engine = SSSNIC_RSS_ENGINE_XOR; +} + +void sss_nic_free_rss_key(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->rss_key); + nic_dev->rss_key = NULL; + nic_dev->rss_key_big = NULL; + + kfree(nic_dev->rss_indir_tbl); + nic_dev->rss_indir_tbl = NULL; +} + +void sss_nic_set_default_rss_indir(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); +} + +static void sss_nic_maybe_reset_rss_indir(struct net_device *netdev, bool dcb_en) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int i; + + if (dcb_en) { + nicif_info(nic_dev, drv, netdev, "DCB is enabled, set default rss indir\n"); + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + return; + } + + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) { + if (nic_dev->rss_indir_tbl[i] >= nic_dev->qp_res.qp_num) { + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + return; + } + } +} + +static u16 sss_nic_get_online_cpu(struct pci_dev *pdev) +{ + int i; + int node; + u16 cpu_num = 0; + + for (i = 0; i < (int)num_online_cpus(); i++) { + node = (int)cpu_to_node(i); + if (node == dev_to_node(&pdev->dev)) + cpu_num++; + } + + if (cpu_num == 0) + cpu_num = (u16)num_online_cpus(); + + return cpu_num; +} + +static void sss_nic_init_qp_num(struct sss_nic_dev *nic_dev) +{ + u16 cpu_num = 0; + u16 qp_num = nic_dev->max_qp_num; + u16 default_qp_num = nic_dev->nic_svc_cap.def_queue_num; + + if (default_qp_num != 0 && default_qp_num < qp_num) + qp_num = default_qp_num; + + if (max_qp_num > nic_dev->max_qp_num) + qp_num = nic_dev->max_qp_num; + else if (max_qp_num > 0) + qp_num = max_qp_num; + + cpu_num = sss_nic_get_online_cpu(nic_dev->pdev); + + nic_dev->qp_res.qp_num = (u16)min_t(u16, qp_num, cpu_num); +} + +static void sss_nic_set_rss_hkey(struct sss_nic_dev *nic_dev, const u8 *key) +{ + u32 i; + u32 *rss_hkey = (u32 *)nic_dev->rss_key; + + memcpy(nic_dev->rss_key, key, SSSNIC_RSS_KEY_SIZE); + + /* make a copy of the key, and convert it to Big Endian */ + for (i = 0; i < SSSNIC_RSS_KEY_SIZE / sizeof(u32); i++) + nic_dev->rss_key_big[i] = cpu_to_be32(rss_hkey[i]); +} + +static void sss_nic_init_rss_default_key(struct sss_nic_dev *nic_dev) +{ + u8 default_key[SSSNIC_RSS_KEY_SIZE] = { + SSSNIC_DEF_RSS_KEY_0, SSSNIC_DEF_RSS_KEY_1, SSSNIC_DEF_RSS_KEY_2, + SSSNIC_DEF_RSS_KEY_3, SSSNIC_DEF_RSS_KEY_4, SSSNIC_DEF_RSS_KEY_5, + SSSNIC_DEF_RSS_KEY_6, SSSNIC_DEF_RSS_KEY_7, SSSNIC_DEF_RSS_KEY_8, + SSSNIC_DEF_RSS_KEY_9, SSSNIC_DEF_RSS_KEY_10, SSSNIC_DEF_RSS_KEY_11, + SSSNIC_DEF_RSS_KEY_12, SSSNIC_DEF_RSS_KEY_13, SSSNIC_DEF_RSS_KEY_14, + SSSNIC_DEF_RSS_KEY_15, SSSNIC_DEF_RSS_KEY_16, SSSNIC_DEF_RSS_KEY_17, + SSSNIC_DEF_RSS_KEY_18, SSSNIC_DEF_RSS_KEY_19, SSSNIC_DEF_RSS_KEY_20, + SSSNIC_DEF_RSS_KEY_21, SSSNIC_DEF_RSS_KEY_22, SSSNIC_DEF_RSS_KEY_23, + SSSNIC_DEF_RSS_KEY_24, SSSNIC_DEF_RSS_KEY_25, SSSNIC_DEF_RSS_KEY_26, + SSSNIC_DEF_RSS_KEY_27, SSSNIC_DEF_RSS_KEY_28, SSSNIC_DEF_RSS_KEY_29, + SSSNIC_DEF_RSS_KEY_30, SSSNIC_DEF_RSS_KEY_31, SSSNIC_DEF_RSS_KEY_32, + SSSNIC_DEF_RSS_KEY_33, SSSNIC_DEF_RSS_KEY_34, SSSNIC_DEF_RSS_KEY_35, + SSSNIC_DEF_RSS_KEY_36, SSSNIC_DEF_RSS_KEY_37, SSSNIC_DEF_RSS_KEY_38, + SSSNIC_DEF_RSS_KEY_39 + }; + + sss_nic_set_rss_hkey(nic_dev, default_key); +} + +static int sss_nic_alloc_rss_key(struct sss_nic_dev *nic_dev) +{ + /* We need double the space to store the RSS key, + * with the second space used to store the RSS key in big-endian mode. + */ + nic_dev->rss_key = + kzalloc(SSSNIC_RSS_KEY_SIZE * SSSNIC_RSS_KEY_RSV_NUM, GFP_KERNEL); + if (!nic_dev->rss_key) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc memory for rss_hkey\n"); + return -ENOMEM; + } + + nic_dev->rss_indir_tbl = kzalloc(sizeof(u32) * SSSNIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!nic_dev->rss_indir_tbl) { + kfree(nic_dev->rss_key); + nic_dev->rss_key = NULL; + return -ENOMEM; + } + + /* The second space is for big edian hash key */ + nic_dev->rss_key_big = (u32 *)(nic_dev->rss_key + SSSNIC_RSS_KEY_SIZE); + + return 0; +} + +static int sss_nic_config_rss_hw_resource(struct sss_nic_dev *nic_dev, u32 *indir) +{ + int ret; + u8 engine_type = nic_dev->rss_hash_engine; + + ret = sss_nic_set_rss_indir_tbl(nic_dev, indir); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, nic_dev->rss_type); + if (ret != 0) + return ret; + + return sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &engine_type); +} + +static int sss_nic_set_hw_rss(struct net_device *netdev, u8 *cos_map, u8 cos_num) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + bool dcb_en = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + ret = sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, nic_dev->rss_key); + if (ret != 0) + return ret; + + sss_nic_maybe_reset_rss_indir(netdev, dcb_en); + + if (test_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags)) + sss_nic_fill_indir_tbl(nic_dev, cos_num, nic_dev->rss_indir_tbl); + + ret = sss_nic_config_rss_hw_resource(nic_dev, nic_dev->rss_indir_tbl); + if (ret != 0) + return ret; + + ret = sss_nic_config_rss_to_hw(nic_dev, cos_num, cos_map, + nic_dev->qp_res.qp_num, 1); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_nic_init_rss_key(struct sss_nic_dev *nic_dev) +{ + sss_nic_init_rss_default_key(nic_dev); + + sss_nic_init_qp_num(nic_dev); + + sss_nic_init_rss_type(nic_dev); + + sss_nic_fill_indir_tbl(nic_dev, 0, nic_dev->rss_indir_tbl); +} + +static int sss_nic_set_rss_key_to_hw(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 engine_type = nic_dev->rss_hash_engine; + + ret = sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, nic_dev->rss_key); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_indir_tbl(nic_dev, nic_dev->rss_indir_tbl); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, nic_dev->rss_type); + if (ret != 0) + return ret; + + ret = sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &engine_type); + if (ret != 0) + return ret; + + ret = sss_nic_init_hw_rss(nic_dev, nic_dev->qp_res.qp_num); + if (ret != 0) + return ret; + + return 0; +} + +void sss_nic_try_to_enable_rss(struct sss_nic_dev *nic_dev) +{ + int ret = 0; + + if (!SSSNIC_SUPPORT_RSS(nic_dev->nic_io) || nic_dev->max_qp_num <= 1) { + nic_dev->qp_res.qp_num = nic_dev->max_qp_num; + return; + } + + ret = sss_nic_alloc_rss_key(nic_dev); + if (ret != 0) + goto disable_rss; + + set_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags); + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + sss_nic_init_rss_key(nic_dev); + + ret = sss_nic_set_rss_key_to_hw(nic_dev); + if (ret != 0) { + sss_nic_free_rss_key(nic_dev); + nic_err(nic_dev->dev_hdl, "Fail to set hardware rss parameters\n"); + goto disable_rss; + } + + return; + +disable_rss: + clear_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags); + nic_dev->max_qp_num = 1; + nic_dev->qp_res.qp_num = nic_dev->max_qp_num; +} + +/* for ethtool */ +static int sss_nic_set_l4_rss_hash_type(const struct ethtool_rxnfc *cmd, + struct sss_nic_rss_type *rss_type) +{ + u8 rss_l4_en = 0; + + if ((cmd->data & SSSNIC_RXH_PORT) == 0) + rss_l4_en = 0; + else if ((cmd->data & SSSNIC_RXH_PORT) == SSSNIC_RXH_PORT) + rss_l4_en = 1; + else + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sss_nic_update_rss_type(struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd, + struct sss_nic_rss_type *rss_type) +{ + int ret; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = sss_nic_set_l4_rss_hash_type(cmd, rss_type); + if (ret != 0) + return ret; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupport flow type\n"); + return -EINVAL; + } + + return 0; +} + +static inline int sss_nic_check_cmd_data(struct ethtool_rxnfc *cmd) +{ + /* RSS only support hashing to queues based src and dst IP and port */ + if (cmd->data & ~SSSNIC_SUPPORT_RXH) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(cmd->data & SSSNIC_RXH_IP)) + return -EINVAL; + + return 0; +} + +static int sss_nic_set_rss_hash_type(struct sss_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_rss_type *rss_type = &nic_dev->rss_type; + int ret; + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) == 0) { + cmd->data = 0; + nicif_err(nic_dev, drv, nic_dev->netdev, + "RSS disable, no support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + if (sss_nic_check_cmd_data(cmd) != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid ethool rxnfc cmd data\n"); + return -EINVAL; + } + + ret = sss_nic_get_rss_type(nic_dev, rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get rss type\n"); + return -EFAULT; + } + + ret = sss_nic_update_rss_type(nic_dev, cmd, rss_type); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, *rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to set rss type\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Success to set rss hash options\n"); + + return 0; +} + +static void translate_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) +{ + if (rss_opt != 0) + cmd->data |= SSSNIC_RXH_PORT; +} + +static int sss_nic_translate_rss_type(struct sss_nic_dev *nic_dev, + struct sss_nic_rss_type *rss_type, + struct ethtool_rxnfc *cmd) +{ + cmd->data = SSSNIC_RXH_IP; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + translate_rss_type(rss_type->tcp_ipv4, cmd); + break; + case UDP_V4_FLOW: + translate_rss_type(rss_type->udp_ipv4, cmd); + break; + case TCP_V6_FLOW: + translate_rss_type(rss_type->tcp_ipv6, cmd); + break; + case UDP_V6_FLOW: + translate_rss_type(rss_type->udp_ipv6, cmd); + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +static int sss_nic_get_rss_hash_type(struct sss_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_rss_type rss_type = {0}; + int ret; + + cmd->data = 0; + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) == 0) + return 0; + + ret = sss_nic_get_rss_type(nic_dev, &rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get rss type\n"); + return ret; + } + + return sss_nic_translate_rss_type(nic_dev, &rss_type, cmd); +} + +int sss_nic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->qp_res.qp_num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = (u32)nic_dev->rx_rule.rule_cnt; + break; + case ETHTOOL_GRXCLSRULE: + ret = sss_nic_ethtool_get_flow(nic_dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + ret = sss_nic_ethtool_get_all_flows(nic_dev, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = sss_nic_get_rss_hash_type(nic_dev, cmd); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +int sss_nic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = sss_nic_set_rss_hash_type(nic_dev, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = sss_nic_ethtool_update_flow(nic_dev, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = sss_nic_ethtool_delete_flow(nic_dev, cmd->fs.location); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static u16 sss_nic_channels_max(struct sss_nic_dev *nic_dev) +{ + u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + + return tcs ? nic_dev->max_qp_num / tcs : nic_dev->max_qp_num; +} + +static u16 sss_nic_curr_channels(struct sss_nic_dev *nic_dev) +{ + if (netif_running(nic_dev->netdev)) + return nic_dev->qp_res.qp_num ? + nic_dev->qp_res.qp_num : 1; + else + return (u16)min_t(u16, sss_nic_channels_max(nic_dev), + nic_dev->qp_res.qp_num); +} + +void sss_nic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + channels->tx_count = 0; + channels->rx_count = 0; + channels->other_count = 0; + channels->max_tx = 0; + channels->max_rx = 0; + channels->max_other = 0; + channels->max_combined = sss_nic_channels_max(nic_dev); + /* report flow director queues as maximum channels */ + channels->combined_count = sss_nic_curr_channels(nic_dev); +} + +static int sss_nic_check_channel_parameter(struct net_device *netdev, + const struct ethtool_channels *channels) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int combined_count = channels->combined_count; + u16 max_channel = sss_nic_channels_max(nic_dev); + + if (combined_count == 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupport combined_count=0\n"); + return -EINVAL; + } + + if (channels->tx_count != 0 || channels->rx_count != 0 || + channels->other_count != 0) { + nicif_err(nic_dev, drv, netdev, + "Set rx/tx/other count no support\n"); + return -EINVAL; + } + + if (combined_count > max_channel) { + nicif_err(nic_dev, drv, netdev, + "Invalid combined_count %u out of range %u\n", combined_count, + max_channel); + return -EINVAL; + } + + return 0; +} + +static void sss_nic_change_num_channel_reopen_handler(struct sss_nic_dev *nic_dev, + const void *priv_data) +{ + sss_nic_set_default_rss_indir(nic_dev->netdev); +} + +int sss_nic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct sss_nic_qp_resource q_param = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int combined_count = channels->combined_count; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + int ret; + + if (sss_nic_check_channel_parameter(netdev, channels)) + return -EINVAL; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "This function not support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags)) { + if (combined_count < user_cos_num) { + nicif_err(nic_dev, drv, netdev, + "DCB is on, channel num should more than valid cos num:%u\n", + user_cos_num); + return -EOPNOTSUPP; + } + } + + if (SSSNIC_SUPPORT_FDIR(nic_dev->nic_io) && + !sss_nic_validate_channel_setting_in_ntuple(nic_dev, combined_count)) + return -EOPNOTSUPP; + + nicif_info(nic_dev, drv, netdev, "Set max combine queue number from %u to %u\n", + nic_dev->qp_res.qp_num, combined_count); + + if (netif_running(netdev)) { + q_param = nic_dev->qp_res; + q_param.irq_cfg = NULL; + q_param.rq_res_group = NULL; + q_param.sq_res_group = NULL; + q_param.qp_num = (u16)combined_count; + + nicif_info(nic_dev, drv, netdev, "Restart channel\n"); + ret = sss_nic_update_channel_setting(nic_dev, &q_param, + sss_nic_change_num_channel_reopen_handler, + NULL); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to change channel setting\n"); + return -EFAULT; + } + } else { + /* Discard user configured rss */ + sss_nic_set_default_rss_indir(netdev); + nic_dev->qp_res.qp_num = (u16)combined_count; + } + + return 0; +} + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE +u32 sss_nic_get_rxfh_indir_size(struct net_device *netdev) +{ + return SSSNIC_RSS_INDIR_SIZE; +} +#endif + +static int sss_nic_set_rss_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *hash_key) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (indir) { + ret = sss_nic_set_rss_indir_tbl(nic_dev, indir); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set rss indir table\n"); + return -EFAULT; + } + clear_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + + memcpy(nic_dev->rss_indir_tbl, indir, + sizeof(u32) * SSSNIC_RSS_INDIR_SIZE); + nicif_info(nic_dev, drv, netdev, "Success to set rss indir\n"); + } + + if (hash_key) { + ret = sss_nic_set_rss_hash_key(nic_dev, hash_key); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set rss key\n"); + return -EFAULT; + } + + sss_nic_set_rss_hkey(nic_dev, hash_key); + nicif_info(nic_dev, drv, netdev, "Success to set rss key\n"); + } + + return 0; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 sss_nic_get_rxfh_key_size(struct net_device *netdev) +{ + return SSSNIC_RSS_KEY_SIZE; +} + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key, u8 *hfunc) +#else +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = nic_dev->rss_hash_engine ? + ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; +#endif + + if (indir) { + ret = sss_nic_get_rss_indir_tbl(nic_dev, indir); + if (ret != 0) + return -EFAULT; + } + + if (hash_key) + memcpy(hash_key, nic_dev->rss_key, SSSNIC_RSS_KEY_SIZE); + + return ret; +} + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *hash_key, + const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +int sss_nic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key) +#else +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *hash_key) +#endif +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "No support to set rss parameters when rss disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "No support to set indir when DCB enable\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { + nicif_err(nic_dev, drv, netdev, + "No support to set hfunc type except TOP and XOR\n"); + return -EOPNOTSUPP; + } + + nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? + SSSNIC_RSS_ENGINE_XOR : + SSSNIC_RSS_ENGINE_TOEP; + ret = sss_nic_set_rss_hash_engine(nic_dev, + nic_dev->rss_hash_engine); + if (ret != 0) + return -EFAULT; + + nicif_info(nic_dev, drv, netdev, + "Success to set hfunc to RSS_HASH_%s\n", + (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); + } +#endif + ret = sss_nic_set_rss_rxfh(netdev, indir, hash_key); + + return ret; +} + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *rxfh_indir) +#else +int sss_nic_get_rxfh_indir(struct net_device *netdev, u32 *indir) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + u32 *indir = NULL; + + /* In a low version kernel(eg:suse 11.2), call the interface twice. + * First call to get the size value, + * and second call to get the rxfh indir according to the size value. + */ + if (rxfh_indir->size == 0) { + rxfh_indir->size = SSSNIC_RSS_INDIR_SIZE; + return 0; + } + + if (rxfh_indir->size < SSSNIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get rss indir, rss size(%d) less than default rss size(%u).\n", + rxfh_indir->size, SSSNIC_RSS_INDIR_SIZE); + return -EINVAL; + } + + indir = rxfh_indir->ring_index; +#endif + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "No support to get rss when rss disable\n"); + return -EOPNOTSUPP; + } + + if (indir) + ret = sss_nic_get_rss_indir_tbl(nic_dev, indir); + + return ret; +} + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *rxfh_indir) +#else +int sss_nic_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + const u32 *indir = NULL; + + if (rxfh_indir->size != SSSNIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to set rss indir, rss size(%d) is less than default rss size(%u).\n", + rxfh_indir->size, SSSNIC_RSS_INDIR_SIZE); + return -EINVAL; + } + + indir = rxfh_indir->ring_index; +#endif + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "No support to set rss indir when rss disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "No support to set indir when DCB enable\n"); + return -EOPNOTSUPP; + } + + return sss_nic_set_rss_rxfh(netdev, indir, NULL); +} + +#endif /* defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) */ diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h new file mode 100644 index 00000000000000..93b7dee9995182 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RSS_H +#define SSS_NIC_RSS_H + +#include "sss_nic_dev_define.h" + +#define SSS_NIC_NUM_IQ_PER_FUNC 8 + +int sss_nic_update_rss_cfg(struct sss_nic_dev *nic_dev); + +void sss_nic_reset_rss_cfg(struct sss_nic_dev *nic_dev); + +void sss_nic_set_default_rss_indir(struct net_device *netdev); + +void sss_nic_try_to_enable_rss(struct sss_nic_dev *nic_dev); + +void sss_nic_free_rss_key(struct sss_nic_dev *nic_dev); + +/* for ethtool */ +int sss_nic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + +int sss_nic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); + +void sss_nic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +int sss_nic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE +u32 sss_nic_get_rxfh_indir_size(struct net_device *netdev); +#endif /* NOT_HAVE_GET_RXFH_INDIR_SIZE */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 sss_nic_get_rxfh_key_size(struct net_device *netdev); + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); +#else /* HAVE_RXFH_HASHFUNC */ +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#endif /* HAVE_RXFH_HASHFUNC */ + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc); +#else +#ifdef HAVE_RXFH_NONCONST +int sss_nic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#else +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key); +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *indir1); +#else +int sss_nic_get_rxfh_indir(struct net_device *netdev, u32 *indir); +#endif + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *indir1); +#else +int sss_nic_set_rxfh_indir(struct net_device *netdev, const u32 *indir); +#endif /* NOT_HAVE_GET_RXFH_INDIR_SIZE */ + +#endif /* (defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c new file mode 100644 index 00000000000000..a8c3a4a447d17d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_event.h" + +int sss_nic_cfg_rss_hash_key(struct sss_nic_dev *nic_dev, u8 opcode, u8 *hash_key) +{ + int ret; + struct sss_nic_mbx_rss_key_cfg cmd_rss_hash_key = {0}; + u16 out_len = sizeof(cmd_rss_hash_key); + + if (opcode == SSSNIC_MBX_OPCODE_SET) + memcpy(cmd_rss_hash_key.key, hash_key, SSSNIC_RSS_KEY_SIZE); + + cmd_rss_hash_key.opcode = opcode; + cmd_rss_hash_key.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_RSS_HASH_KEY, + &cmd_rss_hash_key, sizeof(cmd_rss_hash_key), + &cmd_rss_hash_key, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_hash_key)) { + nic_err(nic_dev->dev_hdl, + "Fail to hash key,opcode: %d ret: %d, status: 0x%x, out_len: 0x%x\n", + opcode, ret, cmd_rss_hash_key.head.state, out_len); + return -EINVAL; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) + memcpy(hash_key, cmd_rss_hash_key.key, SSSNIC_RSS_KEY_SIZE); + + return 0; +} + +int sss_nic_set_rss_hash_key(struct sss_nic_dev *nic_dev, const u8 *hash_key) +{ + u8 rss_hash_key[SSSNIC_RSS_KEY_SIZE]; + + memcpy(rss_hash_key, hash_key, SSSNIC_RSS_KEY_SIZE); + return sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, rss_hash_key); +} + +int sss_nic_get_rss_indir_tbl(struct sss_nic_dev *nic_dev, u32 *indir_tbl) +{ + int i; + int ret; + u16 *temp_tbl = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (!nic_dev || !indir_tbl) + return -EINVAL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(struct sss_nic_rss_indirect_table); + ret = sss_ctrlq_detail_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_GET_RSS_INDIR_TABLE, + msg_buf, msg_buf, NULL, 0, + SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to get rss indir tbl\n"); + goto get_tbl_fail; + } + + temp_tbl = (u16 *)msg_buf->buf; + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir_tbl[i] = *(temp_tbl + i); + +get_tbl_fail: + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return ret; +} + +static void sss_nic_fill_indir_tbl(struct sss_nic_rss_indirect_table *indir_tbl, + const u32 *indir_table) +{ + u32 i; + u32 tbl_size; + u32 *temp_entry = NULL; + + memset(indir_tbl, 0, sizeof(*indir_tbl)); + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir_tbl->entry[i] = (u16)indir_table[i]; + + temp_entry = (u32 *)indir_tbl->entry; + tbl_size = sizeof(indir_tbl->entry) / (sizeof(u32)); + for (i = 0; i < tbl_size; i++) + temp_entry[i] = cpu_to_be32(temp_entry[i]); +} + +int sss_nic_set_rss_indir_tbl(struct sss_nic_dev *nic_dev, const u32 *indir_tbl) +{ + int ret; + u64 output_param = 0; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (!nic_dev || !indir_tbl) + return -EINVAL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(struct sss_nic_rss_indirect_table); + + sss_nic_fill_indir_tbl(msg_buf->buf, indir_tbl); + + ret = sss_ctrlq_direct_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_SET_RSS_INDIR_TABLE, + msg_buf, &output_param, + 0, SSS_CHANNEL_NIC); + if (ret != 0 || output_param != 0) { + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + nic_err(nic_dev->dev_hdl, "Fail to set rss indir tbl\n"); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + return ret; +} + +static int sss_nic_set_rss_type_by_ctrlq(struct sss_nic_dev *nic_dev, u32 ctx) +{ + int ret; + u64 output_param = 0; + struct sss_nic_rss_ctx_table *rss_ctx_tbl = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + rss_ctx_tbl = (struct sss_nic_rss_ctx_table *)msg_buf->buf; + memset(rss_ctx_tbl, 0, sizeof(*rss_ctx_tbl)); + rss_ctx_tbl->ctx = cpu_to_be32(ctx); + msg_buf->size = sizeof(*rss_ctx_tbl); + + ret = sss_ctrlq_direct_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_SET_RSS_CONTEXT_TABLE, msg_buf, + &output_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || output_param != 0) { + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + nic_err(nic_dev->dev_hdl, "Fail to set rss ctx, ret: %d\n", ret); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return 0; +} + +static int sss_nic_set_rss_type_by_mbx(struct sss_nic_dev *nic_dev, u32 ctx) +{ + struct sss_nic_mbx_rss_ctx ctx_tbl = {0}; + u16 out_len = sizeof(ctx_tbl); + int ret; + + ctx_tbl.func_id = sss_get_global_func_id(nic_dev->hwdev); + ctx_tbl.context = ctx; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_SET_RSS_CTX_TBL_INTO_FUNC, + &ctx_tbl, sizeof(ctx_tbl), &ctx_tbl, &out_len); + + if (ctx_tbl.head.state == SSS_MGMT_CMD_UNSUPPORTED) { + return SSS_MGMT_CMD_UNSUPPORTED; + } else if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &ctx_tbl)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss ctx, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, ctx_tbl.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type rss_type) +{ + int ret; + u32 ctx = 0; + + ctx |= SSSNIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + SSSNIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + SSSNIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6) | + SSSNIC_RSS_TYPE_SET(1, VALID); + + ret = sss_nic_set_rss_type_by_mbx(nic_dev, ctx); + if (ret == SSS_MGMT_CMD_UNSUPPORTED) + ret = sss_nic_set_rss_type_by_ctrlq(nic_dev, ctx); + + return ret; +} + +int sss_nic_get_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type *rss_type) +{ + int ret; + struct sss_nic_mbx_rss_ctx rss_ctx_tbl = {0}; + u16 out_len = sizeof(rss_ctx_tbl); + + if (!nic_dev || !rss_type) + return -EINVAL; + + rss_ctx_tbl.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_RSS_CTX_TBL, + &rss_ctx_tbl, sizeof(rss_ctx_tbl), + &rss_ctx_tbl, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &rss_ctx_tbl)) { + nic_err(nic_dev->dev_hdl, "Fail to get hash type, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, rss_ctx_tbl.head.state, out_len); + return -EINVAL; + } + + rss_type->ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV4); + rss_type->ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV6); + rss_type->ipv6_ext = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV6_EXT); + rss_type->udp_ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, UDP_IPV6); + + return 0; +} + +int sss_nic_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 cmd, u8 *hash_engine) +{ + int ret; + struct sss_nic_mbx_rss_engine_cfg cmd_rss_engine = {0}; + u16 out_len = sizeof(cmd_rss_engine); + + cmd_rss_engine.opcode = cmd; + cmd_rss_engine.func_id = sss_get_global_func_id(nic_dev->hwdev); + + if (cmd == SSSNIC_MBX_OPCODE_SET) + cmd_rss_engine.hash_engine = *hash_engine; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_ENGINE, + &cmd_rss_engine, sizeof(cmd_rss_engine), + &cmd_rss_engine, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_engine)) { + nic_err(nic_dev->dev_hdl, "Fail to handle hash engine,opcode:%d, ret: %d, status: 0x%x, out_len: 0x%x\n", + cmd, ret, cmd_rss_engine.head.state, out_len); + + return -EIO; + } + + if (cmd == SSSNIC_MBX_OPCODE_GET) + *hash_engine = cmd_rss_engine.hash_engine; + + return 0; +} + +int sss_nic_set_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 hash_engine) +{ + return sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &hash_engine); +} + +int sss_nic_config_rss_to_hw(struct sss_nic_dev *nic_dev, + u8 cos_num, u8 *cos_map, u16 qp_num, u8 rss_en) +{ + int ret; + struct sss_nic_mbx_rss_cfg cmd_rss_cfg = {0}; + u16 out_len = sizeof(cmd_rss_cfg); + + if (!nic_dev || !cos_map || (cos_num & (cos_num - 1)) != 0) + return -EINVAL; + + cmd_rss_cfg.rss_en = rss_en; + cmd_rss_cfg.qp_num = qp_num; + cmd_rss_cfg.rq_priority_number = (cos_num > 0) ? (u8)ilog2(cos_num) : 0; + cmd_rss_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + memcpy(cmd_rss_cfg.prio_tc, cos_map, SSSNIC_DCB_UP_MAX); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_RSS_CFG, + &cmd_rss_cfg, sizeof(cmd_rss_cfg), + &cmd_rss_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_rss_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_init_hw_rss(struct sss_nic_dev *nic_dev, u16 qp_num) +{ + int ret; + struct sss_nic_mbx_rss_cfg cmd_rss_cfg = {0}; + u16 out_len = sizeof(cmd_rss_cfg); + + if (!nic_dev) + return -EINVAL; + + cmd_rss_cfg.qp_num = qp_num; + cmd_rss_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_RSS_CFG, + &cmd_rss_cfg, sizeof(cmd_rss_cfg), + &cmd_rss_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_rss_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h new file mode 100644 index 00000000000000..e5515c1e11cf27 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RSS_CFG_H +#define SSS_NIC_RSS_CFG_H + +#include + +#include "sss_nic_cfg_rss_define.h" + +int sss_nic_set_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type rss_type); + +int sss_nic_get_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type *rss_type); + +int sss_nic_set_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 hash_engine); + +int sss_nic_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 cmd, u8 *hash_engine); + +int sss_nic_config_rss_to_hw(struct sss_nic_dev *nic_dev, + u8 cos_num, u8 *prio_tc, u16 qp_num, u8 rss_en); + +int sss_nic_init_hw_rss(struct sss_nic_dev *nic_dev, u16 qp_num); + +int sss_nic_set_rss_hash_key(struct sss_nic_dev *nic_dev, const u8 *hash_key); + +int sss_nic_cfg_rss_hash_key(struct sss_nic_dev *nic_dev, u8 opcode, u8 *hash_key); + +int sss_nic_set_rss_indir_tbl(struct sss_nic_dev *nic_dev, const u32 *indir_tbl); + +int sss_nic_get_rss_indir_tbl(struct sss_nic_dev *nic_dev, u32 *indir_tbl); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c new file mode 100644 index 00000000000000..d26cc00fe02823 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c @@ -0,0 +1,904 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" + +/* rx cqe checksum err */ +#define SSSNIC_RX_IP_CSUM_ERR BIT(0) +#define SSSNIC_RX_TCP_CSUM_ERR BIT(1) +#define SSSNIC_RX_UDP_CSUM_ERR BIT(2) +#define SSSNIC_RX_IGMP_CSUM_ERR BIT(3) +#define SSSNIC_RX_ICMPV4_CSUM_ERR BIT(4) +#define SSSNIC_RX_ICMPV6_CSUM_ERR BIT(5) +#define SSSNIC_RX_SCTP_CRC_ERR BIT(6) +#define SSSNIC_RX_CSUM_HW_CHECK_NONE BIT(7) +#define SSSNIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) + +#define LRO_PKT_HDR_LEN_IPV4 66 +#define LRO_PKT_HDR_LEN_IPV6 86 +#define LRO_PKT_HDR_LEN(cqe) \ + (SSSNIC_GET_RX_IP_TYPE(sss_hw_cpu32((cqe)->offload_type)) == \ + SSSNIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) + +#define SSSNIC_MAX_NUM_RQ 256 + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_IP_TYPE_SHIFT 5 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_SHIFT 7 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_SHIFT 8 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0x1FU +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK 0x3U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_MASK 0x1U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK 0xFU +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define SSSNIC_GET_RX_PKT_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) +#define SSSNIC_GET_RX_IP_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE) +#define SSSNIC_GET_RX_ENC_L3_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, ENC_L3_TYPE) +#define SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT) + +#define SSSNIC_GET_RX_PKT_UMBCAST(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define SSSNIC_GET_RSS_TYPES(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define SSSNIC_RQ_CQE_SGE_VLAN_SHIFT 0 +#define SSSNIC_RQ_CQE_SGE_LEN_SHIFT 16 + +#define SSSNIC_RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define SSSNIC_RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define SSSNIC_RQ_CQE_SGE_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_SGE_##member##_SHIFT) & SSSNIC_RQ_CQE_SGE_##member##_MASK) + +#define SSSNIC_GET_RX_VLAN_TAG(vlan_len) SSSNIC_RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define SSSNIC_GET_RX_PKT_LEN(vlan_len) SSSNIC_RQ_CQE_SGE_GET(vlan_len, LEN) + +#define SSSNIC_GET_RX_CSUM_ERR(status) SSSNIC_RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define SSSNIC_GET_RX_FLUSH(status) SSSNIC_RQ_CQE_STATUS_GET(status, FLUSH) + +#define SSSNIC_GET_RX_BP_EN(status) SSSNIC_RQ_CQE_STATUS_GET(status, BP_EN) + +#define SSSNIC_GET_RX_NUM_LRO(status) SSSNIC_RQ_CQE_STATUS_GET(status, NUM_LRO) + +#define SSSNIC_RX_IS_DECRY_PKT(status) SSSNIC_RQ_CQE_STATUS_GET(status, DECRY_PKT) + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_SHIFT 0 +#define SSSNIC_RQ_CQE_PKT_NUM_SHIFT 1 +#define SSSNIC_RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define SSSNIC_RQ_CQE_PKT_FIRST_LEN_SHIFT 19 + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_MASK 0x1 +#define SSSNIC_RQ_CQE_PKT_NUM_MASK 0x1FU +#define SSSNIC_RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define SSSNIC_RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU + +#define SSSNIC_RQ_CQE_PKT_NUM_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_PKT_##member##_SHIFT) & SSSNIC_RQ_CQE_PKT_##member##_MASK) +#define SSSNIC_GET_RQ_CQE_PKT_NUM(pkt_info) SSSNIC_RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_##member##_SHIFT) & SSSNIC_RQ_CQE_##member##_MASK) +#define SSSNIC_GET_SUPER_CQE_EN(pkt_info) \ + SSSNIC_RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SSSNIC_RX_HDR_SIZE 256 +#define SSSNIC_RX_BUFFER_WRITE 16 + +#define SSSNIC_RX_TCP_PKT 0x3 +#define SSSNIC_RX_UDP_PKT 0x4 +#define SSSNIC_RX_SCTP_PKT 0x7 + +#define SSSNIC_RX_IPV4_PKT 0 +#define SSSNIC_RX_IPV6_PKT 1 +#define SSSNIC_RX_INVALID_IP_TYPE 2 + +#define SSSNIC_RX_PKT_FORMAT_NON_TUNNEL 0 +#define SSSNIC_RX_PKT_FORMAT_VXLAN 1 + +#ifdef HAVE_XDP_SUPPORT +enum sss_nic_xdp_pkt { + SSSNIC_XDP_PKT_PASS, + SSSNIC_XDP_PKT_DROP, +}; +#endif + +#define SSSNIC_LRO_PKT_HDR_LEN_IPV4 66 +#define SSSNIC_LRO_PKT_HDR_LEN_IPV6 86 +#define SSSNIC_LRO_PKT_HDR_LEN(cqe) \ + (SSSNIC_GET_RX_IP_TYPE(sss_hw_cpu32((cqe)->offload_type)) == \ + SSSNIC_RX_IPV6_PKT ? SSSNIC_LRO_PKT_HDR_LEN_IPV6 : SSSNIC_LRO_PKT_HDR_LEN_IPV4) + +#define SSSNIC_GET_SGE_NUM(pkt_len, rxq) \ + ((u8)(((pkt_len) >> (rxq)->buff_size_shift) + \ + (((pkt_len) & ((rxq)->buf_len - 1)) ? 1 : 0))) + +bool sss_nic_rx_alloc_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + struct page *page = rx_desc->page; + dma_addr_t dma_addr = rx_desc->buf_daddr; + + if (likely(dma_addr != 0)) + return true; + + page = alloc_pages_node(NUMA_NO_NODE, + GFP_ATOMIC | __GFP_COLD | __GFP_COMP, nic_dev->page_order); + if (unlikely(!page)) + return false; + + dma_addr = dma_map_page(nic_dev->dev_hdl, page, 0, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(nic_dev->dev_hdl, dma_addr) != 0)) { + __free_pages(page, nic_dev->page_order); + return false; + } + + rx_desc->page = page; + rx_desc->buf_daddr = dma_addr; + rx_desc->page_offset = 0; + + return true; +} + +u32 sss_nic_fill_bd_sge(struct sss_nic_rq_desc *rq_desc) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_rqe *rqe = NULL; + u32 idle_wqe = rq_desc->delta - 1; + dma_addr_t dma_addr; + u32 i; + + for (i = 0; i < idle_wqe; i++) { + rx_desc = &rq_desc->rx_desc_group[rq_desc->pi]; + rqe = rx_desc->rqe; + + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, rx_desc))) { + SSSNIC_RQ_STATS_INC(rq_desc, alloc_rx_dma_err); + break; + } + + dma_addr = rx_desc->buf_daddr + rx_desc->page_offset; + + if (rq_desc->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { + rqe->extend_rqe.bd_sect.sge.low_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->extend_rqe.bd_sect.sge.high_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } else { + rqe->normal_rqe.bd_lo_addr = sss_hw_be32(lower_32_bits(dma_addr)); + rqe->normal_rqe.bd_hi_addr = sss_hw_be32(upper_32_bits(dma_addr)); + } + rq_desc->pi = (u16)((rq_desc->pi + 1) & rq_desc->qid_mask); + } + + if (likely(i != 0)) { + sss_nic_write_db(rq_desc->rq, rq_desc->qid & (SSSNIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rq_desc->pi << rq_desc->rq->wqe_type)); + + rq_desc->delta -= i; + rq_desc->backup_pi = rq_desc->pi; + } else if (idle_wqe == rq_desc->q_depth - 1) { + SSSNIC_RQ_STATS_INC(rq_desc, rx_buf_errors); + } + + return i; +} + +#define SSS_NIC_FILL_BD_SGE(rq_desc) \ +do { \ + struct sss_nic_dev *nic_dev = netdev_priv((rq_desc)->netdev); \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + struct sss_nic_rqe *_rqe = NULL; \ + u32 _idle_wqe = (rq_desc)->delta - 1; \ + dma_addr_t _dma_addr; \ + u32 _id; \ +\ + for (_id = 0; _id < _idle_wqe; _id++) { \ + _rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->pi]; \ + _rqe = _rx_desc->rqe; \ +\ + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, _rx_desc))) { \ + SSSNIC_RQ_STATS_INC((rq_desc), alloc_rx_dma_err); \ + break; \ + } \ +\ + _dma_addr = _rx_desc->buf_daddr + _rx_desc->page_offset; \ +\ + if ((rq_desc)->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { \ + _rqe->extend_rqe.bd_sect.sge.low_addr = \ + sss_hw_be32(lower_32_bits(_dma_addr)); \ + _rqe->extend_rqe.bd_sect.sge.high_addr = \ + sss_hw_be32(upper_32_bits(_dma_addr)); \ + } else { \ + _rqe->normal_rqe.bd_lo_addr = sss_hw_be32(lower_32_bits(_dma_addr)); \ + _rqe->normal_rqe.bd_hi_addr = sss_hw_be32(upper_32_bits(_dma_addr)); \ + } \ + (rq_desc)->pi = (u16)(((rq_desc)->pi + 1) & (rq_desc)->qid_mask); \ + } \ +\ + if (likely(_id != 0)) { \ + sss_nic_write_db((rq_desc)->rq, (rq_desc)->qid & (SSSNIC_DCB_COS_MAX - 1), \ + RQ_CFLAG_DP, \ + (u16)((u32)(rq_desc)->pi << (rq_desc)->rq->wqe_type)); \ +\ + (rq_desc)->delta -= _id; \ + (rq_desc)->backup_pi = (rq_desc)->pi; \ + } else if (_idle_wqe == (rq_desc)->q_depth - 1) { \ + SSSNIC_RQ_STATS_INC((rq_desc), rx_buf_errors); \ + } \ +} while (0) + +#define sss_nic_rx_reuse_dma_page(rq_desc, old_rqe_desc) \ +do { \ + u16 _pi = (rq_desc)->backup_pi; \ + struct sss_nic_rx_desc *new_rqe_desc; \ +\ + new_rqe_desc = &(rq_desc)->rx_desc_group[_pi++]; \ +\ + (rq_desc)->backup_pi = (_pi < (rq_desc)->q_depth) ? _pi : 0; \ +\ + new_rqe_desc->page = (old_rqe_desc)->page; \ + new_rqe_desc->page_offset = (old_rqe_desc)->page_offset; \ + new_rqe_desc->buf_daddr = (old_rqe_desc)->buf_daddr; \ +\ + dma_sync_single_range_for_device((rq_desc)->dev, new_rqe_desc->buf_daddr, \ + new_rqe_desc->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ +} while (0) + +#if L1_CACHE_BYTES < 128 +#define PREFETCH_L1_CACHE(vaddr) prefetch((vaddr) + L1_CACHE_BYTES) +#else +#define PREFETCH_L1_CACHE(vaddr) do {} while (0) +#endif + +#define sss_nic_skb_add_rx_frag(rq_desc, rx_desc, skb, size, ret_flag) \ +do { \ + u8 *vaddr; \ + struct page *page; \ +\ + page = (rx_desc)->page; \ + vaddr = (u8 *)page_address(page) + (rx_desc)->page_offset; \ + prefetch(vaddr); \ + PREFETCH_L1_CACHE(vaddr); \ +\ + dma_sync_single_range_for_cpu((rq_desc)->dev, (rx_desc)->buf_daddr, \ + (rx_desc)->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ +\ + if ((size) <= SSSNIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { \ + memcpy(__skb_put((skb), (size)), vaddr, ALIGN((size), sizeof(long))); \ + if (likely(page_to_nid(page) == numa_node_id())) \ + *(ret_flag) = true; \ + else { \ + put_page(page); \ + *(ret_flag) = false; \ + } \ + } else { \ + skb_add_rx_frag((skb), skb_shinfo(skb)->nr_frags, page, \ + (int)(rx_desc)->page_offset, (int)(size), (rq_desc)->buf_len); \ + if (unlikely(page_count(page) != 1)) \ + *(ret_flag) = false; \ + else if (unlikely(page_to_nid(page) != numa_node_id())) \ + *(ret_flag) = false; \ + else { \ + (rx_desc)->page_offset ^= (rq_desc)->buf_len; \ + get_page(page); \ + *(ret_flag) = true; \ + } \ + } \ +} while (0) + +#define sss_nic_combine_skb(rq_desc, head_skb, sge_num, pkt_size) \ +do { \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + struct sk_buff *_skb = NULL; \ + u8 _frag_num = 0; \ + u32 tmp_pkt_sz = (pkt_size); \ + u8 tmp_sge_num = (sge_num); \ + u32 _size; \ + u32 _ci; \ + u8 _ret; \ +\ + _skb = (head_skb); \ + _ci = (rq_desc)->ci & (rq_desc)->qid_mask; \ + while (tmp_sge_num > 0) { \ + _rx_desc = &(rq_desc)->rx_desc_group[_ci]; \ + if (unlikely(tmp_pkt_sz > (rq_desc)->buf_len)) { \ + _size = (rq_desc)->buf_len; \ + tmp_pkt_sz -= (rq_desc)->buf_len; \ + } else { \ + _size = tmp_pkt_sz; \ + } \ +\ + if (unlikely(_frag_num == MAX_SKB_FRAGS)) { \ + if (_skb == (head_skb)) \ + _skb = skb_shinfo(_skb)->frag_list; \ + else \ + _skb = _skb->next; \ +\ + _frag_num = 0; \ + } \ +\ + if (unlikely(_skb != (head_skb))) { \ + (head_skb)->truesize += (rq_desc)->buf_len; \ + (head_skb)->len += _size; \ + (head_skb)->data_len += _size; \ + } \ +\ + sss_nic_skb_add_rx_frag((rq_desc), _rx_desc, _skb, _size, &_ret); \ + if (likely(_ret)) \ + sss_nic_rx_reuse_dma_page((rq_desc), _rx_desc); \ + else \ + dma_unmap_page((rq_desc)->dev, _rx_desc->buf_daddr, \ + (rq_desc)->dma_buff_size, DMA_FROM_DEVICE); \ +\ + _rx_desc->buf_daddr = 0; \ + _rx_desc->page = NULL; \ + tmp_sge_num--; \ + _frag_num++; \ + _ci = (_ci + 1) & (rq_desc)->qid_mask; \ + } \ +} while (0) + +#define sss_nic_fetch_one_skb(rq_desc, pkt_size, ret_skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ + struct sk_buff *head_skb = NULL; \ + struct sk_buff *next_skb = NULL; \ + struct sk_buff *_skb = NULL; \ + u8 sge_num; \ + u8 skb_num; \ +\ + head_skb = netdev_alloc_skb_ip_align((rq_desc)->netdev, SSSNIC_RX_HDR_SIZE); \ + if (likely(head_skb)) { \ + sge_num = SSSNIC_GET_SGE_NUM((pkt_size), (rq_desc)); \ + if (likely(sge_num <= MAX_SKB_FRAGS)) \ + skb_num = 1; \ + else \ + skb_num = (sge_num / MAX_SKB_FRAGS) + \ + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); \ +\ + while (unlikely(skb_num > 1)) { \ + next_skb = netdev_alloc_skb_ip_align(_netdev, SSSNIC_RX_HDR_SIZE); \ + if (unlikely(!next_skb)) { \ + dev_kfree_skb_any(head_skb); \ + break; \ + } \ +\ + if (!_skb) { \ + skb_shinfo(head_skb)->frag_list = next_skb; \ + _skb = next_skb; \ + } else { \ + _skb->next = next_skb; \ + _skb = next_skb; \ + } \ +\ + skb_num--; \ + } \ +\ + if (likely(skb_num <= 1)) { \ + prefetchw(head_skb->data); \ + sss_nic_combine_skb((rq_desc), head_skb, sge_num, (pkt_size)); \ +\ + (rq_desc)->delta += sge_num; \ + (rq_desc)->ci += sge_num; \ +\ + (ret_skb) = head_skb; \ + } else { \ + (ret_skb) = NULL; \ + } \ + } else { \ + (ret_skb) = NULL; \ + } \ +} while (0) + +void sss_nic_get_rq_stats(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_stats *stats) +{ + struct sss_nic_rq_stats *rq_stats = &rq_desc->stats; + unsigned int start; + + u64_stats_update_begin(&stats->stats_sync); + do { + start = u64_stats_fetch_begin(&rq_stats->stats_sync); + stats->rx_bytes = rq_stats->rx_bytes; + stats->rx_packets = rq_stats->rx_packets; + stats->csum_errors = rq_stats->csum_errors; + stats->other_errors = rq_stats->other_errors; + stats->errors = rq_stats->csum_errors + rq_stats->other_errors; + stats->rx_dropped = rq_stats->rx_dropped; + stats->xdp_dropped = rq_stats->xdp_dropped; + stats->rx_buf_errors = rq_stats->rx_buf_errors; + } while (u64_stats_fetch_retry(&rq_stats->stats_sync, start)); + u64_stats_update_end(&stats->stats_sync); +} + +static unsigned int sss_nic_eth_get_headlen(struct sk_buff *skb, + unsigned char *data, + unsigned int max_hlen) +{ +#ifdef HAVE_ETH_GET_HEADLEN_FUNC +#ifdef ETH_GET_HEADLEN_NEED_DEV + return eth_get_headlen(skb->dev, data, SSSNIC_RX_HDR_SIZE); +#else + return eth_get_headlen(data, SSSNIC_RX_HDR_SIZE); +#endif +#else +#define IP_FRAG_OFFSET 0x1FFF +#define FCOE_HLEN 38 +#define TCP_HEAD_OFFSET 12 + u8 nexthdr = 0; + u16 proto; + u8 hlen; + union { + struct ethhdr *eth; + struct vlan_ethhdr *vlan; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + unsigned char *data; + } header; + + if (unlikely(max_hlen < ETH_HLEN)) + return max_hlen; + + header.data = data; + proto = header.eth->h_proto; + + if (proto == htons(ETH_P_8021AD) || proto == htons(ETH_P_8021Q)) { + if (unlikely(max_hlen < ETH_HLEN + VLAN_HLEN)) + return max_hlen; + + proto = header.vlan->h_vlan_encapsulated_proto; + header.data += sizeof(struct vlan_ethhdr); + } else { + header.data += ETH_HLEN; + } + + if (proto == htons(ETH_P_IP)) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct iphdr))) + return max_hlen; + + hlen = (header.data[0] & 0x0F) << 2; + if (hlen < sizeof(struct iphdr)) + return (unsigned int)(header.data - data); + + if ((header.ipv4->frag_off & htons(IP_FRAG_OFFSET)) == 0) + nexthdr = header.ipv4->proto; + + header.data += hlen; + } else if (proto == htons(ETH_P_IPV6)) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct ipv6hdr))) + return max_hlen; + + nexthdr = header.ipv6->nexthdr; + header.data += sizeof(struct ipv6hdr); + } else if (proto == htons(ETH_P_FCOE)) { + header.data += FCOE_HLEN; + } else { + return (unsigned int)(header.data - data); + } + + if (nexthdr == IPPROTO_TCP) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct tcphdr))) + return max_hlen; + + if (SSSNIC_HEADER_LEN_TO_BYTE(header.data[TCP_HEAD_OFFSET] & 0xF0) > + sizeof(struct tcphdr)) + header.data += SSSNIC_HEADER_LEN_TO_BYTE(header.data[TCP_HEAD_OFFSET] & + 0xF0); + else + header.data += sizeof(struct tcphdr); + } else if (nexthdr == IPPROTO_UDP || nexthdr == IPPROTO_UDPLITE) { + header.data += sizeof(struct udphdr); + } else if (nexthdr == IPPROTO_SCTP) { + header.data += sizeof(struct sctphdr); + } + + if ((header.data - data) > max_hlen) + return max_hlen; + else + return (unsigned int)(header.data - data); +#endif +} + +#define sss_nic_pull_tail(skb) \ +do { \ + skb_frag_t *_frag = &skb_shinfo(skb)->frags[0]; \ + unsigned int _len; \ + unsigned char *_data = NULL; \ +\ + _data = skb_frag_address(_frag); \ +\ + _len = sss_nic_eth_get_headlen((skb), _data, SSSNIC_RX_HDR_SIZE); \ +\ + skb_copy_to_linear_data((skb), _data, ALIGN(_len, sizeof(long))); \ +\ + skb_frag_size_sub(_frag, (int)_len); \ + skb_frag_off_add(_frag, (int)_len); \ +\ + (skb)->tail += _len; \ + (skb)->data_len -= _len; \ +} while (0) + +#define sss_nic_check_rx_csum(rq_desc, offload_type, status, skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ + u32 pkt_fmt = SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type); \ + u32 pkt_type = SSSNIC_GET_RX_PKT_TYPE(offload_type); \ + u32 ip_type = SSSNIC_GET_RX_IP_TYPE(offload_type); \ + u32 chksum_err; \ +\ + chksum_err = SSSNIC_GET_RX_CSUM_ERR(status); \ + if (unlikely(chksum_err == SSSNIC_RX_CSUM_IPSU_OTHER_ERR)) \ + (rq_desc)->stats.other_errors++; \ +\ + if ((_netdev->features & NETIF_F_RXCSUM)) { \ + if (unlikely(chksum_err != 0)) { \ + if ((chksum_err & \ + (SSSNIC_RX_CSUM_HW_CHECK_NONE | \ + SSSNIC_RX_CSUM_IPSU_OTHER_ERR)) == 0) \ + (rq_desc)->stats.csum_errors++; \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } else if (ip_type == SSSNIC_RX_INVALID_IP_TYPE || \ + !(pkt_fmt == SSSNIC_RX_PKT_FORMAT_NON_TUNNEL || \ + pkt_fmt == SSSNIC_RX_PKT_FORMAT_VXLAN)) { \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } else if (pkt_type == SSSNIC_RX_TCP_PKT || \ + pkt_type == SSSNIC_RX_UDP_PKT || \ + pkt_type == SSSNIC_RX_SCTP_PKT) \ + (skb)->ip_summed = CHECKSUM_UNNECESSARY; \ + else \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } \ +} while (0) + +#ifdef HAVE_SKBUFF_CSUM_LEVEL +#define sss_nic_check_rx_gso(rq_desc, offload_type, skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ +\ + if (_netdev->features & NETIF_F_GRO) { \ + if (SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) == \ + SSSNIC_RX_PKT_FORMAT_VXLAN && \ + (skb)->ip_summed == CHECKSUM_UNNECESSARY) \ + (skb)->csum_level = 1; \ + } \ +} while (0) +#else +#define sss_nic_check_rx_gso(rq_desc, offload_type, skb) do {} while (0) +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ + +static void sss_nic_loop_copy_data(struct sss_nic_dev *nic_dev, + struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *loop_test_rx_buf = nic_dev->loop_test_rx_buf; + int loop_pkt_len = nic_dev->loop_pkt_len; + void *frag_data = NULL; + int frag_size; + int pkt_off; + int i; + + if (nic_dev->loop_test_rx_cnt == SSSNIC_LP_PKT_CNT) { + nic_dev->loop_test_rx_cnt = 0; + nicif_warn(nic_dev, rx_err, netdev, "Loopback test received too many pkts\n"); + } + + if (skb->len != loop_pkt_len) { + nicif_warn(nic_dev, rx_err, netdev, "Invalid packet length\n"); + nic_dev->loop_test_rx_cnt++; + return; + } + + pkt_off = nic_dev->loop_test_rx_cnt * loop_pkt_len; + frag_size = (int)skb_headlen(skb); + memcpy(loop_test_rx_buf + pkt_off, skb->data, (size_t)(u32)frag_size); + + pkt_off += frag_size; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_size = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy(loop_test_rx_buf + pkt_off, frag_data, (size_t)(u32)frag_size); + + pkt_off += frag_size; + } + nic_dev->loop_test_rx_cnt++; +} + +#define sss_nic_update_gso_params(skb, gso) \ +do { \ + struct ethhdr *_ether = (struct ethhdr *)((skb)->data); \ + __be16 _protocol; \ +\ + _protocol = __vlan_get_protocol((skb), _ether->h_proto, NULL); \ +\ + skb_shinfo(skb)->gso_segs = gso; \ + skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP(((skb)->len - skb_headlen(skb)), \ + gso); \ + skb_shinfo(skb)->gso_type = (_protocol == htons(ETH_P_IP)) ? \ + SKB_GSO_TCPV4 : SKB_GSO_TCPV6; \ +} while (0) + +#ifdef HAVE_XDP_SUPPORT +#define sss_nic_xdp_update_rx_info(rq_desc, wqe_num) \ +do { \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + u16 _wqe_cnt = wqe_num; \ +\ + while (_wqe_cnt > 0) { \ + _rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->ci & (rq_desc)->qid_mask]; \ + if (likely(page_to_nid(_rx_desc->page) == numa_node_id())) \ + sss_nic_rx_reuse_dma_page((rq_desc), _rx_desc); \ +\ + (rq_desc)->ci++; \ + (rq_desc)->delta++; \ + _rx_desc->buf_daddr = 0; \ + _rx_desc->page = NULL; \ +\ + _wqe_cnt--; \ + } \ +} while (0) + +#ifdef HAVE_XDP_FRAME_SZ +#define SSSNIC_SET_XDP_FRAME_SZ(xdp, len) ((xdp)->frame_sz = (len)) +#else +#define SSSNIC_SET_XDP_FRAME_SZ(xdp, len) do {} while (0) +#endif + +#ifdef HAVE_XDP_DATA_META +#define SSSNIC_XDP_SET_DATA_META_INVALID(xdp) xdp_set_data_meta_invalid(xdp) +#else +#define SSSNIC_XDP_SET_DATA_META_INVALID(xdp) do {} while (0) +#endif + +#ifdef HAVE_BFP_WARN_NETDEV_PARAM +#define SSSNIC_BDF_WARN_INVALID_XDP_ACTION(netdev, xdp_prog, ret) \ + bpf_warn_invalid_xdp_action(netdev, xdp_prog, ret) +#else +#define SSSNIC_BDF_WARN_INVALID_XDP_ACTION(netdev, xdp_prog, ret) \ + bpf_warn_invalid_xdp_action(ret) +#endif + +#define sss_nic_bpf_prog_run_xdp(rq_desc, pkt_size, result) \ +do { \ + struct bpf_prog *xdp_prog = NULL; \ + struct sss_nic_rx_desc *rx_desc = NULL; \ + struct xdp_buff xdp; \ + u16 _wqe_num = 1; \ + u8 *_data = NULL; \ + u32 _ret; \ +\ + rcu_read_lock(); \ +\ + xdp_prog = READ_ONCE((rq_desc)->xdp_prog); \ + if (!xdp_prog) { \ + *(result) = SSSNIC_XDP_PKT_PASS; \ + } else if (unlikely((pkt_size) > (rq_desc)->buf_len)) { \ + SSSNIC_RQ_STATS_INC((rq_desc), large_xdp_pkts); \ + _wqe_num = (u16)((pkt_size) >> (rq_desc)->buff_size_shift) + \ + (((pkt_size) & ((rq_desc)->buf_len - 1)) ? 1 : 0); \ + SSSNIC_RQ_STATS_INC((rq_desc), xdp_dropped); \ + sss_nic_xdp_update_rx_info((rq_desc), _wqe_num); \ + *(result) = SSSNIC_XDP_PKT_DROP; \ + } else { \ + rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->ci & (rq_desc)->qid_mask]; \ + _data = (u8 *)page_address(rx_desc->page) + rx_desc->page_offset; \ + prefetch(_data); \ + dma_sync_single_range_for_cpu((rq_desc)->dev, rx_desc->buf_daddr, \ + rx_desc->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ + xdp.data = _data; \ + xdp.data_hard_start = xdp.data; \ + xdp.data_end = xdp.data + (pkt_size); \ + SSSNIC_SET_XDP_FRAME_SZ(&xdp, (rq_desc)->buf_len); \ + SSSNIC_XDP_SET_DATA_META_INVALID(&xdp); \ + prefetchw(xdp.data_hard_start); \ +\ + _ret = bpf_prog_run_xdp(xdp_prog, &xdp); \ + if (_ret == XDP_PASS) { \ + *(result) = SSSNIC_XDP_PKT_PASS; \ + } else { \ + *(result) = SSSNIC_XDP_PKT_DROP; \ + if (_ret != XDP_DROP) { \ + SSSNIC_BDF_WARN_INVALID_XDP_ACTION((rq_desc)->netdev, \ + xdp_prog, _ret); \ + } \ + SSSNIC_RQ_STATS_INC((rq_desc), xdp_dropped); \ + sss_nic_xdp_update_rx_info((rq_desc), _wqe_num); \ + } \ + } \ +\ + rcu_read_unlock(); \ +} while (0) +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) +#define sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len) \ +do { \ + u16 vlan_id; \ + if (((netdev)->features & NETIF_F_HW_VLAN_CTAG_RX) != 0 && \ + SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) != 0) { \ + vlan_id = SSSNIC_GET_RX_VLAN_TAG(vlan_len); \ +\ + /* if the packet is a vlan pkt, the vid may be 0 */ \ + __vlan_hwaccel_put_tag((skb), htons(ETH_P_8021Q), vlan_id); \ + } \ +} while (0) +#else +#define sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len) \ +do { \ + u16 vlan_id; \ + if (((netdev)->features & NETIF_F_HW_VLAN_RX) != 0 && \ + SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) != 0) { \ + vlan_id = SSSNIC_GET_RX_VLAN_TAG(vlan_len); \ +\ + /* if the packet is a vlan pkt, the vid may be 0 */ \ + __vlan_hwaccel_put_tag((skb), htons(ETH_P_8021Q), vlan_id); \ + } \ +} while (0) +#endif + +static int sss_nic_recv_one_packet(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_cqe *rx_cqe, u32 pkt_len, + u32 vlan_len, u32 status) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + struct sk_buff *skb = NULL; + u32 offload_type; + u16 lro_segs; + +#ifdef HAVE_XDP_SUPPORT + u32 xdp_result; + + sss_nic_bpf_prog_run_xdp(rq_desc, pkt_len, &xdp_result); + if (xdp_result == SSSNIC_XDP_PKT_DROP) + return 0; +#endif + + sss_nic_fetch_one_skb(rq_desc, pkt_len, skb); + if (unlikely(!skb)) { + SSSNIC_RQ_STATS_INC(rq_desc, alloc_skb_err); + return -ENOMEM; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + sss_nic_pull_tail(skb); + + offload_type = sss_hw_cpu32(rx_cqe->offload_type); + sss_nic_check_rx_csum(rq_desc, offload_type, status, skb); + sss_nic_check_rx_gso(rq_desc, offload_type, skb); + sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len); + + if (unlikely(test_bit(SSSNIC_LP_TEST, &nic_dev->flags))) + sss_nic_loop_copy_data(nic_dev, skb); + + lro_segs = SSSNIC_GET_RX_NUM_LRO(status); + if (lro_segs > 0) + sss_nic_update_gso_params(skb, lro_segs); + + skb_record_rx_queue(skb, rq_desc->qid); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { +#ifdef HAVE_NAPI_GRO_FLUSH_OLD + napi_gro_flush(&rq_desc->irq_cfg->napi, false); +#else + napi_gro_flush(&rq_desc->irq_cfg->napi); +#endif + netif_receive_skb(skb); + } else { + napi_gro_receive(&rq_desc->irq_cfg->napi, skb); + } + + return 0; +} + +int sss_nic_rx_poll(struct sss_nic_rq_desc *rq_desc, int budget) +{ + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + struct sss_nic_cqe *rx_cqe = NULL; + u64 rx_bytes = 0; + int pkts = 0; + int rx_packets = 0; + u16 wqe_num = 0; + u16 lro_segs; + u32 ci; + u32 status; + u32 pkt_len; + u32 vlan_len; + + while (likely(pkts < budget)) { + ci = rq_desc->ci & rq_desc->qid_mask; + rx_cqe = rq_desc->rx_desc_group[ci].cqe; + status = sss_hw_cpu32(rx_cqe->state); + if (!SSSNIC_GET_RX_DONE(status)) + break; + + /* read rx cqe firstly */ + rmb(); + + vlan_len = sss_hw_cpu32(rx_cqe->vlan_len); + pkt_len = SSSNIC_GET_RX_PKT_LEN(vlan_len); + if (sss_nic_recv_one_packet(rq_desc, rx_cqe, pkt_len, vlan_len, status)) + break; + + rx_bytes += pkt_len; + pkts++; + rx_packets++; + + lro_segs = SSSNIC_GET_RX_NUM_LRO(status); + if (lro_segs > 0) { + rx_bytes += ((lro_segs - 1) * SSSNIC_LRO_PKT_HDR_LEN(rx_cqe)); + wqe_num += SSSNIC_GET_SGE_NUM(pkt_len, rq_desc); + } + + rx_cqe->state = 0; + + if (wqe_num >= nic_dev->rx_poll_wqe) + break; + } + + if (rq_desc->delta >= SSSNIC_RX_BUFFER_WRITE) + SSS_NIC_FILL_BD_SGE(rq_desc); + + u64_stats_update_begin(&rq_desc->stats.stats_sync); + rq_desc->stats.rx_packets += (u64)(u32)rx_packets; + rq_desc->stats.rx_bytes += rx_bytes; + u64_stats_update_end(&rq_desc->stats.stats_sync); + + return pkts; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h new file mode 100644 index 00000000000000..15df34e5b17458 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_H +#define SSS_NIC_RX_H + +#include +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_HEADER_LEN_TO_BYTE(header) ((header) >> 2) + +#define SSSNIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define SSSNIC_RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define SSSNIC_RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define SSSNIC_RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define SSSNIC_RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define SSSNIC_RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define SSSNIC_RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define SSSNIC_RQ_CQE_STATUS_DECRY_PKT_SHIFT 29 +#define SSSNIC_RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define SSSNIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define SSSNIC_RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define SSSNIC_RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define SSSNIC_RQ_CQE_STATUS_FLUSH_MASK 0x1U +#define SSSNIC_RQ_CQE_STATUS_DECRY_PKT_MASK 0x1U + +#define SSSNIC_RQ_CQE_STATUS_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_STATUS_##member##_SHIFT) & \ + SSSNIC_RQ_CQE_STATUS_##member##_MASK) + +#define SSSNIC_GET_RQ_CQE_STATUS(rq_desc, id) \ + sss_hw_cpu32((rq_desc)->rx_desc_group[id].cqe->state) + +#define SSSNIC_GET_RX_DONE(status) SSSNIC_RQ_CQE_STATUS_GET(status, RXDONE) + +bool sss_nic_rx_alloc_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc); +u32 sss_nic_fill_bd_sge(struct sss_nic_rq_desc *rq_desc); +void sss_nic_get_rq_stats(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_stats *stats); +int sss_nic_rx_poll(struct sss_nic_rq_desc *rq_desc, int budget); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c new file mode 100644 index 00000000000000..6877443b765018 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_cfg.h" + +static void sss_nic_rx_free_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + if (rx_desc->buf_daddr != 0) { + dma_unmap_page(nic_dev->dev_hdl, rx_desc->buf_daddr, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + rx_desc->buf_daddr = 0; + } + + if (rx_desc->page) { + __free_pages(rx_desc->page, nic_dev->page_order); + rx_desc->page = NULL; + } +} + +static u32 sss_nic_rx_alloc_dma_buffer(struct sss_nic_dev *nic_dev, + u32 rq_depth, struct sss_nic_rx_desc *rx_desc_group) +{ + u32 i; + + for (i = 0; i < rq_depth - 1; i++) + if (!sss_nic_rx_alloc_dma_page(nic_dev, &rx_desc_group[i])) + break; + + return i; +} + +static void sss_nic_rx_free_dma_buffer(struct sss_nic_dev *nic_dev, + u32 rq_depth, struct sss_nic_rx_desc *rx_desc_group) +{ + u32 id; + + for (id = 0; id < rq_depth; id++) + sss_nic_rx_free_dma_page(nic_dev, &rx_desc_group[id]); +} + +static void _sss_nic_free_rq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_resource *rq_res, u32 rq_depth) +{ + u64 size = sizeof(struct sss_nic_cqe) * rq_depth; + + sss_nic_rx_free_dma_buffer(nic_dev, rq_depth, rq_res->rx_desc_group); + dma_free_coherent(nic_dev->dev_hdl, size, rq_res->cqe_vaddr, rq_res->cqe_paddr); + kfree(rq_res->rx_desc_group); + rq_res->cqe_vaddr = NULL; + rq_res->rx_desc_group = NULL; +} + +int sss_nic_alloc_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int i; + int id; + u32 page_num; + u64 size; + u64 cqe_dma_size = sizeof(struct sss_nic_cqe) * qp_res->rq_depth; + struct sss_nic_rq_resource *rq_res = NULL; + + for (id = 0; id < qp_res->qp_num; id++) { + rq_res = &qp_res->rq_res_group[id]; + rq_res->cqe_vaddr = dma_zalloc_coherent(nic_dev->dev_hdl, cqe_dma_size, + &rq_res->cqe_paddr, GFP_KERNEL); + if (!rq_res->cqe_vaddr) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc cqe dma buf, rq%d\n", id); + goto alloc_cqe_dma_err; + } + + size = sizeof(*rq_res->rx_desc_group) * qp_res->rq_depth; + rq_res->rx_desc_group = kzalloc(size, GFP_KERNEL); + if (!rq_res->rx_desc_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rx info, rq%d\n", id); + goto alloc_rqe_desc_group_err; + } + + page_num = sss_nic_rx_alloc_dma_buffer(nic_dev, qp_res->rq_depth, + rq_res->rx_desc_group); + if (page_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rx buffer, rq%d\n", id); + goto alloc_rx_buf_err; + } + rq_res->page_num = (u16)page_num; + } + return 0; + +alloc_rx_buf_err: + kfree(rq_res->rx_desc_group); + rq_res->rx_desc_group = NULL; + +alloc_rqe_desc_group_err: + dma_free_coherent(nic_dev->dev_hdl, cqe_dma_size, rq_res->cqe_vaddr, + rq_res->cqe_paddr); + rq_res->cqe_vaddr = NULL; + +alloc_cqe_dma_err: + for (i = 0; i < id; i++) + _sss_nic_free_rq_resource(nic_dev, &qp_res->rq_res_group[i], + qp_res->rq_depth); + + return -ENOMEM; +} + +void sss_nic_free_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int id; + + for (id = 0; id < qp_res->qp_num; id++) + _sss_nic_free_rq_resource(nic_dev, &qp_res->rq_res_group[id], + qp_res->rq_depth); +} + +static void sss_nic_init_rq_desc(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_qp_resource *qp_res, + struct sss_nic_rq_resource *rq_res, + struct sss_irq_desc *irq_desc) +{ + u32 id; + dma_addr_t dma_addr; + struct sss_nic_cqe *rq_cqe; + + rq_desc->irq_id = irq_desc->irq_id; + rq_desc->msix_id = irq_desc->msix_id; + rq_desc->pi = 0; + rq_desc->backup_pi = rq_res->page_num; + rq_desc->q_depth = qp_res->rq_depth; + rq_desc->delta = rq_desc->q_depth; + rq_desc->qid_mask = rq_desc->q_depth - 1; + rq_desc->ci = 0; + rq_desc->last_sw_pi = rq_desc->q_depth - 1; + rq_desc->last_sw_ci = 0; + rq_desc->last_hw_ci = 0; + rq_desc->check_err_cnt = 0; + rq_desc->print_err_cnt = 0; + rq_desc->rx_pkts = 0; + rq_desc->reset_wqe_num = 0; + rq_desc->rx_desc_group = rq_res->rx_desc_group; + + dma_addr = rq_res->cqe_paddr; + rq_cqe = (struct sss_nic_cqe *)rq_res->cqe_vaddr; + for (id = 0; id < qp_res->rq_depth; id++) { + rq_desc->rx_desc_group[id].cqe = rq_cqe; + rq_desc->rx_desc_group[id].cqe_daddr = dma_addr; + dma_addr += sizeof(*rq_desc->rx_desc_group[id].cqe); + rq_cqe++; + } +} + +static void sss_nic_fill_cqe_sge(struct sss_nic_rq_desc *rq_desc) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_rqe *rqe = NULL; + u32 i; + + for (i = 0; i < rq_desc->q_depth; i++) { + rx_desc = &rq_desc->rx_desc_group[i]; + rqe = sss_wq_wqebb_addr(&rq_desc->rq->wq, (u16)i); + + if (rq_desc->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { + sss_set_sge(&rqe->extend_rqe.cqe_sect.sge, rx_desc->cqe_daddr, + (sizeof(struct sss_nic_cqe) >> SSSNIC_CQE_SIZE_SHIFT)); + + rqe->extend_rqe.bd_sect.sge.len = nic_dev->rx_buff_len; + } else { + rqe->normal_rqe.cqe_lo_addr = lower_32_bits(rx_desc->cqe_daddr); + rqe->normal_rqe.cqe_hi_addr = upper_32_bits(rx_desc->cqe_daddr); + } + + rx_desc->rqe = rqe; + } +} + +int sss_nic_init_rq_desc_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + u16 qid; + u32 pkt; + + nic_dev->get_rq_fail_cnt = 0; + for (qid = 0; qid < qp_res->qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + rq_desc->rq = &nic_dev->nic_io->rq_group[rq_desc->qid]; + + sss_nic_init_rq_desc(rq_desc, qp_res, &qp_res->rq_res_group[qid], + &nic_dev->irq_desc_group[qid]); + + sss_nic_fill_cqe_sge(rq_desc); + + pkt = sss_nic_fill_bd_sge(rq_desc); + if (pkt == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to fill rx buffer\n"); + return -ENOMEM; + } + } + + return 0; +} + +void sss_nic_free_rq_desc_group(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->rq_desc_group); + nic_dev->rq_desc_group = NULL; +} + +int sss_nic_alloc_rq_desc_group(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + u16 rq_num = nic_dev->max_qp_num; + u16 i; + + nic_dev->rq_desc_group = kcalloc(rq_num, sizeof(*nic_dev->rq_desc_group), GFP_KERNEL); + if (!nic_dev->rq_desc_group) + return -ENOMEM; + + for (i = 0; i < rq_num; i++) { + rq_desc = &nic_dev->rq_desc_group[i]; + rq_desc->dev = nic_dev->dev_hdl; + rq_desc->netdev = nic_dev->netdev; + rq_desc->qid = i; + rq_desc->qid_mask = nic_dev->qp_res.rq_depth - 1; + rq_desc->q_depth = nic_dev->qp_res.rq_depth; + rq_desc->dma_buff_size = nic_dev->rx_dma_buff_size; + rq_desc->buff_size_shift = (u32)ilog2(nic_dev->rx_buff_len); + rq_desc->buf_len = nic_dev->rx_buff_len; + u64_stats_init(&rq_desc->stats.stats_sync); + } + + return 0; +} + +int sss_nic_update_rx_rss(struct sss_nic_dev *nic_dev) +{ + int ret; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) { + ret = sss_nic_update_rss_cfg(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init rss\n"); + return -EFAULT; + } + } + + return 0; +} + +void sss_nic_reset_rx_rss(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) != 0) + sss_nic_reset_rss_cfg(nic_dev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h new file mode 100644 index 00000000000000..1273262c49fec8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_INIT_H +#define SSS_NIC_RX_INIT_H + +#include +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +int sss_nic_alloc_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +void sss_nic_free_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +int sss_nic_init_rq_desc_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +int sss_nic_alloc_rq_desc_group(struct sss_nic_dev *nic_dev); + +void sss_nic_free_rq_desc_group(struct sss_nic_dev *nic_dev); + +int sss_nic_update_rx_rss(struct sss_nic_dev *nic_dev); + +void sss_nic_reset_rx_rss(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c new file mode 100644 index 00000000000000..3720e6ec8bd6fe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" +#include "sss_nic_rx_reset.h" + +#define SSSNIC_RQ_GET_ERR_CNT_THRESHOLD 3 +#define SSSNIC_RQ_CHECK_ERR_CNT_THRESHOLD 2 +#define SSSNIC_RQ_PRINT_CNT_THRESHOLD 3 + +static inline void sss_nic_fill_wqe_sge(struct sss_nic_rx_desc *rx_desc, + u8 wqe_type) +{ + dma_addr_t dma_addr = rx_desc->buf_daddr + rx_desc->page_offset; + struct sss_nic_rqe *rqe = rx_desc->rqe; + + if (unlikely(wqe_type == SSSNIC_EXTEND_RQ_WQE)) { + rqe->extend_rqe.bd_sect.sge.low_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->extend_rqe.bd_sect.sge.high_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } else { + rqe->normal_rqe.bd_lo_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->normal_rqe.bd_hi_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } +} + +static inline void sss_nic_free_wqe_buffer(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + if (rx_desc->buf_daddr) { + dma_unmap_page(nic_dev->dev_hdl, rx_desc->buf_daddr, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + rx_desc->buf_daddr = 0; + } + + if (rx_desc->page) { + __free_pages(rx_desc->page, nic_dev->page_order); + rx_desc->page = NULL; + } +} + +static inline int sss_nic_fill_idle_wqe(struct sss_nic_rq_desc *rq_desc, + u32 wqebb_num, u32 start_pi) +{ + u32 pi = start_pi; + u32 i; + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + + for (i = 0; i < wqebb_num; i++) { + rx_desc = &rq_desc->rx_desc_group[pi]; + + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, rx_desc))) { + rq_desc->reset_pi = (u16)((rq_desc->reset_pi + i) & rq_desc->qid_mask); + SSSNIC_RQ_STATS_INC(rq_desc, alloc_rx_dma_err); + return -ENOMEM; + } + + sss_nic_fill_wqe_sge(rx_desc, rq_desc->rq->wqe_type); + + pi = (u16)((pi + 1) & rq_desc->qid_mask); + rq_desc->reset_wqe_num++; + } + + return 0; +} + +static int sss_nic_reset_rq(struct sss_nic_dev *nic_dev, u16 qid, u16 hw_ci) +{ + int ret; + u32 i; + u32 total; + u32 ci; + u32 pi; + struct sss_nic_rq_desc *rq_desc = &nic_dev->rq_desc_group[qid]; + u32 idle_wqebb = rq_desc->delta - rq_desc->reset_wqe_num; + struct sss_nic_rx_desc *rx_desc = NULL; + + if (rq_desc->delta < rq_desc->reset_wqe_num) + return -EINVAL; + + if (rq_desc->reset_wqe_num == 0) + rq_desc->reset_pi = rq_desc->pi; + + ci = rq_desc->ci & rq_desc->qid_mask; + total = ci + rq_desc->q_depth - rq_desc->pi; + if ((total % rq_desc->q_depth) != rq_desc->delta) + return -EINVAL; + + ret = sss_nic_fill_idle_wqe(rq_desc, idle_wqebb, rq_desc->reset_pi); + if (ret) + return ret; + + nic_info(nic_dev->dev_hdl, "Reset rq: rq %u, restore_buf_num:%u\n", qid, + rq_desc->reset_wqe_num); + + pi = (hw_ci + rq_desc->q_depth - 1) & rq_desc->qid_mask; + rx_desc = &rq_desc->rx_desc_group[pi]; + sss_nic_free_wqe_buffer(nic_dev, rx_desc); + + rq_desc->delta = 1; + rq_desc->reset_wqe_num = 0; + rq_desc->pi = (u16)pi; + rq_desc->backup_pi = rq_desc->pi; + rq_desc->ci = (u16)((rq_desc->pi + 1) & rq_desc->qid_mask); + + for (i = 0; i < rq_desc->q_depth; i++) { + if (!SSSNIC_GET_RX_DONE(sss_hw_cpu32(rq_desc->rx_desc_group[i].cqe->state))) + continue; + + rq_desc->rx_desc_group[i].cqe->state = 0; + SSSNIC_RQ_STATS_INC(rq_desc, reset_drop_sge); + } + + ret = sss_nic_cache_out_qp_resource(nic_dev->nic_io); + if (ret) { + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + return ret; + } + + sss_nic_write_db(rq_desc->rq, rq_desc->qid & (SSSNIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rq_desc->pi << rq_desc->rq->wqe_type)); + + return 0; +} + +static bool sss_nic_rq_is_normal(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_pc_info check_info) +{ + u32 status; + u32 sw_ci = rq_desc->ci & rq_desc->qid_mask; + + if (check_info.hw_pi != check_info.hw_ci || + check_info.hw_ci != rq_desc->last_hw_ci) + return true; + + if (rq_desc->stats.rx_packets != rq_desc->rx_pkts || + rq_desc->pi != rq_desc->last_sw_pi) + return true; + + status = SSSNIC_GET_RQ_CQE_STATUS(rq_desc, sw_ci); + if (SSSNIC_GET_RX_DONE(status)) + return true; + + if (sw_ci != rq_desc->last_sw_ci || rq_desc->pi != check_info.hw_pi) + return true; + + return false; +} + +void sss_nic_rq_watchdog_handler(struct work_struct *work) +{ + int ret; + u16 qid; + struct sss_nic_rq_pc_info *check_info = NULL; + struct sss_nic_rq_desc *rq_desc = NULL; + struct delayed_work *delay = to_delayed_work(work); + struct sss_nic_dev *nic_dev = container_of(delay, struct sss_nic_dev, rq_watchdog_work); + u64 size = sizeof(*check_info) * nic_dev->qp_res.qp_num; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) + return; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + + if (!size) + return; + check_info = kzalloc(size, GFP_KERNEL); + if (!check_info) + return; + + ret = sss_nic_rq_hw_pc_info(nic_dev, check_info, nic_dev->qp_res.qp_num, + nic_dev->rq_desc_group[0].rq->wqe_type); + if (ret) { + nic_dev->get_rq_fail_cnt++; + if (nic_dev->get_rq_fail_cnt >= SSSNIC_RQ_GET_ERR_CNT_THRESHOLD) + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + goto free_rq_info; + } + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + if (!sss_nic_rq_is_normal(rq_desc, check_info[qid])) { + rq_desc->check_err_cnt++; + if (rq_desc->check_err_cnt < SSSNIC_RQ_CHECK_ERR_CNT_THRESHOLD) + continue; + + if (rq_desc->print_err_cnt <= SSSNIC_RQ_PRINT_CNT_THRESHOLD) { + nic_warn(nic_dev->dev_hdl, + "Rq handle: rq(%u) wqe abnormal, hw_pi:%u, hw_ci:%u, sw_pi:%u, sw_ci:%u delta:%u\n", + qid, check_info[qid].hw_pi, check_info[qid].hw_ci, + rq_desc->pi, + rq_desc->ci & rq_desc->qid_mask, rq_desc->delta); + rq_desc->print_err_cnt++; + } + + ret = sss_nic_reset_rq(nic_dev, qid, check_info[qid].hw_ci); + if (ret) + continue; + } + + rq_desc->last_hw_ci = check_info[qid].hw_ci; + rq_desc->rx_pkts = rq_desc->stats.rx_packets; + rq_desc->last_sw_pi = rq_desc->pi; + rq_desc->last_sw_ci = rq_desc->ci & rq_desc->qid_mask; + rq_desc->print_err_cnt = 0; + rq_desc->check_err_cnt = 0; + } + + nic_dev->get_rq_fail_cnt = 0; + +free_rq_info: + kfree(check_info); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h new file mode 100644 index 00000000000000..6d588e690cca7e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_RESET_H +#define SSS_NIC_RX_RESET_H + +#include +#include +#include +#include +#include +#include + +void sss_nic_rq_watchdog_handler(struct work_struct *work); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c new file mode 100644 index 00000000000000..2bda358869df51 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" + +#define SSSNIC_DEFAULT_MSS 0x3E00 +#define SSSNIC_MIN_MSS 0x50 +#define SSSNIC_SKB_LEN_MIN 32 +#define SSSNIC_SKB_LEN_MAX 16383 +#define SSSNIC_PAYLOAD_OFFSET_MAX 221 + +#define SSSNIC_IPV4_VERSION 4 +#define SSSNIC_IPV6_VERSION 6 +#define SSSNIC_TCP_DOFF_TO_BYTES(doff) ((doff) << 2) +#define SSSNIC_VXLAN_OFFLOAD_PORT 46354 + +#define SSSNIC_TRANSPORT_OFFSET(hdr, skb) ((u32)((hdr) - (skb)->data)) + +// SQE CTRL +#define SSSNIC_SQE_CTRL_SECT_BD0_LEN_SHIFT 0 +#define SSSNIC_SQE_CTRL_SECT_RSVD_SHIFT 18 +#define SSSNIC_SQE_CTRL_SECT_BUFDESC_NUM_SHIFT 19 +#define SSSNIC_SQE_CTRL_SECT_TASKSECT_LEN_SHIFT 27 +#define SSSNIC_SQE_CTRL_SECT_DATA_FORMAT_SHIFT 28 +#define SSSNIC_SQE_CTRL_SECT_DIRECT_SHIFT 29 +#define SSSNIC_SQE_CTRL_SECT_EXTENDED_SHIFT 30 +#define SSSNIC_SQE_CTRL_SECT_OWNER_SHIFT 31 + +#define SSSNIC_SQE_CTRL_SECT_BD0_LEN_MASK 0x3FFFFU +#define SSSNIC_SQE_CTRL_SECT_RSVD_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_BUFDESC_NUM_MASK 0xFFU +#define SSSNIC_SQE_CTRL_SECT_TASKSECT_LEN_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_DATA_FORMAT_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_DIRECT_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_EXTENDED_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_OWNER_MASK 0x1U + +#define SSSNIC_SQE_CTRL_SECT_SET(val, member) \ +(((u32)(val) & SSSNIC_SQE_CTRL_SECT_##member##_MASK) << SSSNIC_SQE_CTRL_SECT_##member##_SHIFT) + +// SQ CTRL QINFO +#define SSSNIC_SQE_CTRL_SECT_QINFO_PKT_TYPE_SHIFT 0 +#define SSSNIC_SQE_CTRL_SECT_QINFO_PLDOFF_SHIFT 2 +#define SSSNIC_SQE_CTRL_SECT_QINFO_UFO_SHIFT 10 +#define SSSNIC_SQE_CTRL_SECT_QINFO_TSO_SHIFT 11 +#define SSSNIC_SQE_CTRL_SECT_QINFO_TCPUDP_CS_SHIFT 12 +#define SSSNIC_SQE_CTRL_SECT_QINFO_MSS_SHIFT 13 +#define SSSNIC_SQE_CTRL_SECT_QINFO_SCTP_SHIFT 27 +#define SSSNIC_SQE_CTRL_SECT_QINFO_UC_SHIFT 28 +#define SSSNIC_SQE_CTRL_SECT_QINFO_PRI_SHIFT 29 + +#define SSSNIC_SQE_CTRL_SECT_QINFO_PKT_TYPE_MASK 0x3U +#define SSSNIC_SQE_CTRL_SECT_QINFO_PLDOFF_MASK 0xFFU +#define SSSNIC_SQE_CTRL_SECT_QINFO_UFO_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_TSO_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_TCPUDP_CS_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_MSS_MASK 0x3FFFU +#define SSSNIC_SQE_CTRL_SECT_QINFO_SCTP_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_UC_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_PRI_MASK 0x7U + +#define SSSNIC_SQE_CTRL_SECT_QINFO_SET(val, member) \ + (((u32)(val) & SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK) << \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT) + +#define SSSNIC_SQE_CTRL_SECT_QINFO_GET(val, member) \ + (((val) >> SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT) & \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK) + +#define SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR(val, member) \ + ((val) & (~(SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK << \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT))) + +// SQ TASK +#define SSSNIC_SQE_TASK_SECT_VALUE0_TUNNEL_FLAG_SHIFT 19 +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_NEXT_PROTO_SHIFT 22 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_EN_SHIFT 24 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L3_EN_SHIFT 25 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_PSEUDO_SHIFT 26 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_EN_SHIFT 27 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L3_EN_SHIFT 28 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_PSEUDO_SHIFT 29 +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_OFFLOAD_SHIFT 30 +#define SSSNIC_SQE_TASK_SECT_VALUE0_IPSEC_PROTO_SHIFT 31 + +#define SSSNIC_SQE_TASK_SECT_VALUE0_TUNNEL_FLAG_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_NEXT_PROTO_MASK 0x3U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L3_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_PSEUDO_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L3_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_PSEUDO_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_OFFLOAD_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_IPSEC_PROTO_MASK 0x1U + +#define SSSNIC_SQE_TASK_SECT_VALUE0_SET(val, member) \ + (((u32)(val) & SSSNIC_SQE_TASK_SECT_VALUE0_##member##_MASK) << \ + SSSNIC_SQE_TASK_SECT_VALUE0_##member##_SHIFT) + +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_SHIFT 0 +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TYPE_SHIFT 16 +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_VALID_SHIFT 19 + +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_MASK 0xFFFFU +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TYPE_MASK 0x7U +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_VALID_MASK 0x1U + +#define SSSNIC_SQE_TASK_SECT_VALUE3_SET(val, member) \ + (((val) & SSSNIC_SQE_TASK_SECT_VALUE3_##member##_MASK) << \ + SSSNIC_SQE_TASK_SECT_VALUE3_##member##_SHIFT) + +#define SSSNIC_VLAN_INSERT_MODE_MAX 5 +#define SSSNIC_TSO_CS_EN 1 +#define SSSNIC_DEF_PKT_CNT 1 + +#define SSSNIC_SQ_STATS_INC(sq_desc, field) \ +do { \ + u64_stats_update_begin(&(sq_desc)->stats.stats_sync); \ + (sq_desc)->stats.field++; \ + u64_stats_update_end(&(sq_desc)->stats.stats_sync); \ +} while (0) + +enum sss_nic_check_tx_offload_type { + SSSNIC_OFFLOAD_TSO = BIT(0), + SSSNIC_OFFLOAD_TX_CSUM = BIT(1), + SSSNIC_OFFLOAD_TX_VLAN = BIT(2), + SSSNIC_OFFLOAD_TX_DISABLE = BIT(3), + SSSNIC_OFFLOAD_TX_ESP = BIT(4), +}; + +union sss_nic_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +struct sss_nic_sqe_ctrl_section { + u32 sect_len; + u32 qinfo; + u32 addr_high; + u32 addr_low; +}; + +/* Engine only pass first 12B TS field directly to uCode through metadata + * vlan_offoad is used for hardware when vlan insert in tx + */ +struct sss_nic_sqe_task_section { + u32 value[4]; +}; + +struct sss_nic_sqe_bd_section { + u32 len; /* 31-bits Length, L2NIC only use length[17:0] */ + u32 rsvd; + u32 addr_high; + u32 addr_low; +}; + +/* use section pointer for support non continuous wqe */ +struct sss_nic_sqe { + struct sss_nic_sqe_ctrl_section *ctrl_sect; + struct sss_nic_sqe_task_section *task_sect; + struct sss_nic_sqe_bd_section *bd_sect0; + struct sss_nic_sqe_bd_section *bd_sect1; + u16 first_bds_num; + u32 wqe_type; + u32 task_type; +}; + +/* ************* SQ_CTRL ************** */ +enum sss_nic_sqe_data_format { + SSSNIC_NORMAL_SQE = 0, +}; + +enum sss_nic_sqe_type { + SSSNIC_SQE_COMPACT_TYPE = 0, + SSSNIC_SQE_EXTENDED_TYPE = 1, +}; + +enum sss_nic_sqe_task_len { + SSSNIC_SQE_TASK_LEN_46BITS = 0, + SSSNIC_SQE_TASK_LEN_128BITS = 1, +}; + +union sss_nic_transport_header { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum sss_nic_sq_l3_proto_type { + SSSNIC_UNSUPPORT_L3_PORTO_TYPE = 0, + SSSNIC_IPV6_PKT = 1, + SSSNIC_IPV4_PKT_NO_CSO = 2, + SSSNIC_IPV4_PKT_WITH_CSO = 3, +}; + +enum sss_nic_sq_l4_offload_type { + SSSNIC_DISABLE_OFFLOAD = 0, + SSSNIC_TCP_OFFLOAD = 1, + SSSNIC_SCTP_OFFLOAD = 2, + SSSNIC_UDP_OFFLOAD = 3, +}; + +static inline __sum16 sss_nic_csum_magic(union sss_nic_ip *ip, + unsigned short proto) +{ + return (ip->v4->version == SSSNIC_IPV4_VERSION) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +#define sss_nic_set_vlan_tx_offload(task_sect, vlan_tag, vlan_type) \ + ((task_sect)->value[3] = SSSNIC_SQE_TASK_SECT_VALUE3_SET((vlan_tag), VLAN_TAG) | \ + SSSNIC_SQE_TASK_SECT_VALUE3_SET((vlan_type), VLAN_TYPE) | \ + SSSNIC_SQE_TASK_SECT_VALUE3_SET(1U, VLAN_TAG_VALID)) + +void sss_nic_get_sq_stats(struct sss_nic_sq_desc *sq_desc, + struct sss_nic_sq_stats *stats) +{ + struct sss_nic_sq_stats *sq_stats = &sq_desc->stats; + unsigned int begin; + + u64_stats_update_begin(&stats->stats_sync); + do { + begin = u64_stats_fetch_begin(&sq_stats->stats_sync); + stats->tx_bytes = sq_stats->tx_bytes; + stats->tx_packets = sq_stats->tx_packets; + stats->tx_busy = sq_stats->tx_busy; + stats->wake = sq_stats->wake; + stats->tx_dropped = sq_stats->tx_dropped; + } while (u64_stats_fetch_retry(&sq_stats->stats_sync, begin)); + u64_stats_update_end(&stats->stats_sync); +} + +#define sss_nic_init_bd_sect(bd_sect, addr, bd_len) \ +do { \ + (bd_sect)->addr_high = sss_hw_be32(upper_32_bits(addr)); \ + (bd_sect)->addr_low = sss_hw_be32(lower_32_bits(addr)); \ + (bd_sect)->len = sss_hw_be32(bd_len); \ +} while (0) + +#define sss_nic_unmap_dma_page(nic_dev, nr_frags, dma_group) \ +do { \ + struct pci_dev *_pdev = (nic_dev)->pdev; \ + int _frag_id; \ +\ + for (_frag_id = 1; _frag_id < (nr_frags) + 1; _frag_id++) \ + dma_unmap_page(&_pdev->dev, (dma_group)[_frag_id].dma, \ + (dma_group)[_frag_id].len, DMA_TO_DEVICE); \ + dma_unmap_single(&_pdev->dev, (dma_group)[0].dma, (dma_group)[0].len, \ + DMA_TO_DEVICE); \ +} while (0) + +static int sss_nic_map_dma_page(struct sss_nic_dev *nic_dev, + struct sk_buff *skb, u16 valid_nr_frag, + struct sss_nic_sq_desc *sq_desc, + struct sss_nic_tx_desc *tx_desc, + struct sss_nic_sqe *sqe) +{ + struct sss_nic_sqe_ctrl_section *ctrl_sect = sqe->ctrl_sect; + struct sss_nic_sqe_bd_section *bd_sect = sqe->bd_sect0; + struct sss_nic_dma_info *dma_group = tx_desc->dma_group; + struct pci_dev *pdev = nic_dev->pdev; + skb_frag_t *frag = NULL; + u32 flag; + int ret; + + dma_group[0].dma = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_group[0].dma)) { + SSSNIC_SQ_STATS_INC(sq_desc, dma_map_err); + return -EFAULT; + } + + dma_group[0].len = skb_headlen(skb); + + ctrl_sect->addr_high = sss_hw_be32(upper_32_bits(dma_group[0].dma)); + ctrl_sect->addr_low = sss_hw_be32(lower_32_bits(dma_group[0].dma)); + ctrl_sect->sect_len = dma_group[0].len; + + for (flag = 0; flag < valid_nr_frag;) { + frag = &(skb_shinfo(skb)->frags[flag]); + if (unlikely(flag == sqe->first_bds_num)) + bd_sect = sqe->bd_sect1; + + flag++; + dma_group[flag].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_group[flag].dma)) { + SSSNIC_SQ_STATS_INC(sq_desc, dma_map_err); + flag--; + ret = -EFAULT; + goto frag_map_err; + } + dma_group[flag].len = skb_frag_size(frag); + + sss_nic_init_bd_sect(bd_sect, dma_group[flag].dma, + dma_group[flag].len); + bd_sect++; + } + return 0; + +frag_map_err: + sss_nic_unmap_dma_page(nic_dev, flag, dma_group); + return ret; +} + +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO +#define sss_nic_ipv6_frag_id(task_sect, skb, ip) \ +do { \ + if ((ip)->v4->version == 6) \ + (task_sect)->value[1] = be32_to_cpu(skb_shinfo(skb)->ip6_frag_id); \ +} while (0) +#else +#define sss_nic_ipv6_frag_id(task_sect, skb, ip) do {} while (0) +#endif + +#define sss_nic_get_inner_transport_info(task_sect, skb, ip, l4, l4_proto, offset, l4_offload) \ +do { \ + if ((l4_proto) == IPPROTO_TCP) { \ + (l4)->tcp->check = ~sss_nic_csum_magic((ip), IPPROTO_TCP); \ + *(l4_offload) = SSSNIC_TCP_OFFLOAD; \ + *(offset) = SSSNIC_TCP_DOFF_TO_BYTES((l4)->tcp->doff) + \ + SSSNIC_TRANSPORT_OFFSET((l4)->hdr, (skb)); \ + } else if ((l4_proto) == IPPROTO_UDP) { \ + sss_nic_ipv6_frag_id(task_sect, (skb), (ip)); \ + *(l4_offload) = SSSNIC_UDP_OFFLOAD; \ + *(offset) = SSSNIC_TRANSPORT_OFFSET((l4)->hdr, (skb)); \ + } \ +} while (0) + +#define sss_nic_check_enc_tx_csum(sq_desc, task_sect, skb, offload) \ +do { \ + union sss_nic_ip _ip; \ + u8 _l4_proto; \ +\ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, TUNNEL_FLAG); \ + _ip.hdr = skb_network_header(skb); \ + if (_ip.v4->version == SSSNIC_IPV4_VERSION) { \ + _l4_proto = _ip.v4->protocol; \ + } else if (_ip.v4->version == SSSNIC_IPV6_VERSION) { \ + union sss_nic_transport_header l4; \ + unsigned char *exthdr; \ + __be16 frag_off; \ +\ + exthdr = _ip.hdr + sizeof(*_ip.v6); \ + _l4_proto = _ip.v6->nexthdr; \ + l4.hdr = skb_transport_header(skb); \ + if (l4.hdr != exthdr) \ + ipv6_skip_exthdr((skb), exthdr - (skb)->data, &_l4_proto, &frag_off); \ + } else { \ + _l4_proto = IPPROTO_RAW; \ + } \ + if (((struct udphdr *)skb_transport_header(skb))->dest != \ + SSSNIC_VXLAN_OFFLOAD_PORT || \ + _l4_proto != IPPROTO_UDP) { \ + SSSNIC_SQ_STATS_INC((sq_desc), unknown_tunnel_proto); \ + /* disable checksum offload */ \ + skb_checksum_help(skb); \ + } else { \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + *(offload) = SSSNIC_OFFLOAD_TX_CSUM; \ + } \ +} while (0) + +#define sss_nic_check_tx_csum(sq_desc, task_sect, skb, offload) \ +do { \ + if ((skb)->ip_summed == CHECKSUM_PARTIAL) {\ + if ((skb)->encapsulation) \ + sss_nic_check_enc_tx_csum((sq_desc), (task_sect), (skb), (offload)); \ + else {\ + (task_sect)->value[0] |= \ + SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + *(offload) = SSSNIC_OFFLOAD_TX_CSUM; \ + } \ + } \ +} while (0) + +#define sss_nic_get_inner_proto_type(skb, ip, l4, l4_proto) \ +do { \ + unsigned char *_ext_hdr = NULL; \ + __be16 _frag_off = 0; \ +\ + if ((ip)->v4->version == SSSNIC_IPV4_VERSION) { \ + *(l4_proto) = (ip)->v4->protocol; \ + } else if ((ip)->v4->version == SSSNIC_IPV6_VERSION) { \ + _ext_hdr = (ip)->hdr + sizeof(*((ip)->v6)); \ + *(l4_proto) = (ip)->v6->nexthdr; \ + if (_ext_hdr != (l4)->hdr) \ + ipv6_skip_exthdr((skb), (int)(_ext_hdr - (skb)->data), \ + (l4_proto), &_frag_off); \ + } else { \ + *(l4_proto) = 0; \ + } \ +} while (0) + +#define sss_nic_set_tso_info(task_sect, qinfo, l4_offload, offset, mss) \ +do { \ + if ((l4_offload) == SSSNIC_TCP_OFFLOAD) { \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, TSO); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + } else if ((l4_offload) == SSSNIC_UDP_OFFLOAD) { \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, UFO); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + } \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L3_EN); \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET((offset) >> 1, PLDOFF); \ + *(qinfo) = SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR(*(qinfo), MSS); \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET((mss), MSS); \ +} while (0) + +#define sss_nic_get_proto_hdr(task_sect, skb, ip, l4) \ +do { \ + if ((skb)->encapsulation) { \ + u32 gso_type = skb_shinfo(skb)->gso_type; \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, OUT_L3_EN); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, TUNNEL_FLAG); \ +\ + (l4)->hdr = skb_transport_header(skb); \ + (ip)->hdr = skb_network_header(skb); \ +\ + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { \ + (l4)->udp->check = ~sss_nic_csum_magic((ip), IPPROTO_UDP); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, OUT_L4_EN); \ + } \ +\ + (ip)->hdr = skb_inner_network_header(skb); \ + (l4)->hdr = skb_inner_transport_header(skb); \ + } else { \ + (ip)->hdr = skb_network_header(skb); \ + (l4)->hdr = skb_transport_header(skb); \ + } \ +} while (0) + +#define sss_nic_check_tso(task_sect, qinfo, skb, offload) \ +do { \ + enum sss_nic_sq_l4_offload_type _l4_offload = SSSNIC_DISABLE_OFFLOAD; \ + union sss_nic_ip _ip; \ + union sss_nic_transport_header _l4; \ + u32 _offset = 0; \ + u8 _l4_proto; \ + int _ret; \ +\ + _ret = skb_cow_head((skb), 0); \ + if (_ret < 0) \ + *(offload) = SSSNIC_OFFLOAD_TX_DISABLE; \ + else { \ + sss_nic_get_proto_hdr((task_sect), (skb), &_ip, &_l4); \ + sss_nic_get_inner_proto_type(skb, &_ip, &_l4, &_l4_proto); \ + sss_nic_get_inner_transport_info((task_sect), (skb), &_ip, &_l4, \ + _l4_proto, &_offset, &_l4_offload); \ + sss_nic_set_tso_info((task_sect), (qinfo), _l4_offload, _offset, \ + skb_shinfo(skb)->gso_size); \ +\ + if (unlikely(SSSNIC_SQE_CTRL_SECT_QINFO_GET(*(qinfo), PLDOFF) > \ + SSSNIC_PAYLOAD_OFFSET_MAX)) \ + *(offload) = SSSNIC_OFFLOAD_TX_DISABLE; \ + else \ + *(offload) = SSSNIC_OFFLOAD_TSO; \ + } \ +} while (0) + +#define sss_nic_check_tx_offload(sq_desc, task_sect, skb, qinfo, offload) \ +do { \ + if (skb_is_gso(skb) == 0) \ + sss_nic_check_tx_csum((sq_desc), (task_sect), (skb), (offload)); \ + else \ + sss_nic_check_tso((task_sect), (qinfo), (skb), (offload)); \ +\ + if (*(offload) != SSSNIC_OFFLOAD_TX_DISABLE) { \ + if (unlikely(skb_vlan_tag_present(skb))) { \ + sss_nic_set_vlan_tx_offload((task_sect), skb_vlan_tag_get(skb), \ + (sq_desc)->qid % \ + SSSNIC_VLAN_INSERT_MODE_MAX); \ + *(offload) |= SSSNIC_OFFLOAD_TX_VLAN; \ + } \ + } \ +} while (0) + +#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET +#define sss_nic_get_inner_ihs(skb) \ + (skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb)) +#else +#define sss_nic_get_inner_ihs(skb) \ + ((skb_inner_transport_header(skb) - (skb)->data) + inner_tcp_hdrlen(skb)) +#endif + +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && defined(HAVE_SK_BUFF_ENCAPSULATION)) +#define sss_nic_get_ihs(skb, ihs) \ +do { \ + if ((skb)->encapsulation) \ + (ihs) = sss_nic_get_inner_ihs(skb); \ + else \ + (ihs) = skb_transport_offset(skb) + tcp_hdrlen(skb); \ +} while (0) +#else +#define sss_nic_get_ihs(skb, ihs) \ + ((ihs) = skb_transport_offset(skb) + tcp_hdrlen(skb)) +#endif + +#define sss_nic_get_pkt_stats(tx_desc, skb) \ +do { \ + u32 _ihs; \ + u32 _hdr_len; \ +\ + if (skb_is_gso(skb)) { \ + sss_nic_get_ihs((skb), _ihs); \ + _hdr_len = (skb_shinfo(skb)->gso_segs - 1) * _ihs; \ + (tx_desc)->bytes = (skb)->len + (u64)_hdr_len; \ + } else { \ + (tx_desc)->bytes = (skb)->len > ETH_ZLEN ? (skb)->len : ETH_ZLEN; \ + } \ + (tx_desc)->nr_pkt_cnt = SSSNIC_DEF_PKT_CNT; \ +} while (0) + +#define sss_nic_get_sq_free_wqebbs(sq) sss_wq_free_wqebb(&(sq)->wq) + +static inline int sss_nic_check_tx_stop(struct sss_nic_sq_desc *sq_desc, + u16 wqebb_cnt) +{ + if (likely(sss_nic_get_sq_free_wqebbs(sq_desc->sq) >= wqebb_cnt)) + return 0; + + /* We need to check again in a case another CPU has free room available. */ + netif_stop_subqueue(sq_desc->netdev, sq_desc->qid); + + if (likely(sss_nic_get_sq_free_wqebbs(sq_desc->sq) < wqebb_cnt)) + return -EBUSY; + + /* wake up queue when there are enough wqebbs */ + netif_start_subqueue(sq_desc->netdev, sq_desc->qid); + + return 0; +} + +#define sss_nic_get_and_update_sq_owner(sq, owner_ptr, curr_pi, wqebb_cnt) \ +do { \ + if (unlikely((curr_pi) + (wqebb_cnt) >= (sq)->wq.q_depth)) \ + (sq)->owner = !(sq)->owner; \ + *(owner_ptr) = (sq)->owner; \ +} while (0) + +#define sss_nic_combo_sqe(sq, sqe, task, curr_pi, owner, offload, sge_cnt) \ +do { \ + void *_wqebb = NULL; \ + void *_second_part_wqebbs_addr = NULL; \ + u16 _tmp_pi; \ + u16 _first_part_wqebbs_num; \ + int _id; \ +\ + (sqe)->ctrl_sect = sss_wq_get_one_wqebb(&(sq)->wq, (curr_pi)); \ + if ((offload) == 0 && (sge_cnt) == 1) { \ + (sqe)->wqe_type = SSSNIC_SQE_COMPACT_TYPE; \ + sss_nic_get_and_update_sq_owner((sq), (owner), *(curr_pi), 1); \ + } else { \ + (sqe)->wqe_type = SSSNIC_SQE_EXTENDED_TYPE; \ +\ + if ((offload) != 0) { \ + (sqe)->task_sect = sss_wq_get_one_wqebb(&(sq)->wq, &_tmp_pi); \ + (sqe)->task_type = SSSNIC_SQE_TASK_LEN_128BITS; \ +\ + for (_id = 0; _id < ARRAY_LEN((sqe)->task_sect->value); _id++) \ + (sqe)->task_sect->value[_id] = sss_hw_be32((task)->value[_id]); \ +\ + } else { \ + (sqe)->task_type = SSSNIC_SQE_TASK_LEN_46BITS; \ + } \ +\ + if ((sge_cnt) > 1) { \ + /* first wqebb contain bd0, so use weqbb_cnt(sge_num-1) */ \ + _wqebb = sss_wq_get_multi_wqebb(&(sq)->wq, (sge_cnt) - 1, &_tmp_pi, \ + &_second_part_wqebbs_addr, \ + &_first_part_wqebbs_num); \ + (sqe)->first_bds_num = _first_part_wqebbs_num; \ + (sqe)->bd_sect1 = _second_part_wqebbs_addr; \ + (sqe)->bd_sect0 = _wqebb; \ + } \ +\ + sss_nic_get_and_update_sq_owner((sq), (owner), *(curr_pi), \ + (sge_cnt) + (u16)!!(offload)); \ + } \ +} while (0) + +#define SSSNIC_FILL_COMPACT_WQE_CTRL_SECT(sqe, ctrl_sect, owner) \ +do { \ + (ctrl_sect)->sect_len |= \ + SSSNIC_SQE_CTRL_SECT_SET((owner), OWNER) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->wqe_type, EXTENDED) | \ + SSSNIC_SQE_CTRL_SECT_SET(SSSNIC_NORMAL_SQE, DATA_FORMAT); \ + (ctrl_sect)->sect_len = sss_hw_be32((ctrl_sect)->sect_len); \ + (ctrl_sect)->qinfo = 0; \ +} while (0) + +#define SSSNIC_FILL_EXTEND_WQE_CTRL_SECT(sqe, ctrl_sect, info, sge_cnt, owner) \ +do { \ + (ctrl_sect)->sect_len |= SSSNIC_SQE_CTRL_SECT_SET((sge_cnt), BUFDESC_NUM) | \ + SSSNIC_SQE_CTRL_SECT_SET((owner), OWNER) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->task_type, TASKSECT_LEN) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->wqe_type, EXTENDED) | \ + SSSNIC_SQE_CTRL_SECT_SET(SSSNIC_NORMAL_SQE, DATA_FORMAT); \ +\ + (ctrl_sect)->sect_len = sss_hw_be32((ctrl_sect)->sect_len); \ + (ctrl_sect)->qinfo = (info); \ + (ctrl_sect)->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, UC); \ +\ + if (!SSSNIC_SQE_CTRL_SECT_QINFO_GET((ctrl_sect)->qinfo, MSS)) { \ + (ctrl_sect)->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(SSSNIC_DEFAULT_MSS, MSS); \ + } else if (SSSNIC_SQE_CTRL_SECT_QINFO_GET((ctrl_sect)->qinfo, MSS) < SSSNIC_MIN_MSS) { \ + /* mss should not less than 80 */ \ + (ctrl_sect)->qinfo = SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR((ctrl_sect)->qinfo, MSS); \ + ctrl_sect->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(SSSNIC_MIN_MSS, MSS); \ + } \ + (ctrl_sect)->qinfo = sss_hw_be32((ctrl_sect)->qinfo); \ +} while (0) + +#define sss_nic_init_sq_ctrl(sqe, info, sge_cnt, owner) \ +do { \ + if ((sqe)->wqe_type == SSSNIC_SQE_COMPACT_TYPE) \ + SSSNIC_FILL_COMPACT_WQE_CTRL_SECT((sqe), (sqe)->ctrl_sect, (owner)); \ + else \ + SSSNIC_FILL_EXTEND_WQE_CTRL_SECT((sqe), (sqe)->ctrl_sect, \ + (info), (sge_cnt), (owner)); \ +} while (0) + +#define sss_nic_rollback_sq_wqebbs(sq, wqebb_cnt, owner) \ +do { \ + if ((owner) != (sq)->owner) \ + (sq)->owner = (u8)(owner); \ + (sq)->wq.pi -= (wqebb_cnt); \ +} while (0) + +#define sss_nic_update_sq_local_ci(sq, wqebb_cnt) \ + sss_update_wq_ci(&(sq)->wq, (wqebb_cnt)) + +static netdev_tx_t sss_nic_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct sss_nic_sq_desc *sq_desc) +{ + u32 qinfo = 0; + u32 offload = 0; + u16 pi = 0; + u16 owner; + u16 sge_cnt; + u16 nr_frags = 0; + u16 wqebb_cnt; + bool find_zero_len = false; + int ret; + int frag_id; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_tx_desc *tx_desc = NULL; + struct sss_nic_sqe sqe = {0}; + struct sss_nic_sqe_task_section task_sect = {0}; + + if (unlikely(skb->len < SSSNIC_SKB_LEN_MIN)) { + if (skb_pad(skb, (int)(SSSNIC_SKB_LEN_MIN - skb->len))) { + SSSNIC_SQ_STATS_INC(sq_desc, skb_pad_err); + goto tx_drop_pad_err; + } + + skb->len = SSSNIC_SKB_LEN_MIN; + } + + for (frag_id = 0; frag_id < skb_shinfo(skb)->nr_frags; frag_id++) { + if (skb_frag_size(&skb_shinfo(skb)->frags[frag_id]) == 0) { + find_zero_len = true; + continue; + } else if (find_zero_len) { + SSSNIC_SQ_STATS_INC(sq_desc, frag_size_zero); + goto tx_drop_pkts; + } + nr_frags++; + } + sge_cnt = nr_frags + 1; + wqebb_cnt = sge_cnt + 1; /* task info need 1 wqebb */ + + if (unlikely(sss_nic_check_tx_stop(sq_desc, wqebb_cnt))) { + SSSNIC_SQ_STATS_INC(sq_desc, tx_busy); + return NETDEV_TX_BUSY; + } + + sss_nic_check_tx_offload(sq_desc, &task_sect, skb, &qinfo, &offload); + if (unlikely(offload == SSSNIC_OFFLOAD_TX_DISABLE)) { + SSSNIC_SQ_STATS_INC(sq_desc, offload_err); + goto tx_drop_pkts; + } else if (offload == 0) { + /* no TS in current wqe */ + wqebb_cnt -= 1; + if (unlikely(sge_cnt == 1 && skb->len > SSSNIC_SKB_LEN_MAX)) + goto tx_drop_pkts; + } + + sss_nic_combo_sqe(sq_desc->sq, &sqe, &task_sect, &pi, &owner, offload, sge_cnt); + + tx_desc = &sq_desc->tx_desc_group[pi]; + tx_desc->nr_frags = nr_frags; + tx_desc->wqebb_cnt = wqebb_cnt; + tx_desc->skb = skb; + ret = sss_nic_map_dma_page(nic_dev, skb, nr_frags, sq_desc, tx_desc, &sqe); + if (ret != 0) { + sss_nic_rollback_sq_wqebbs(sq_desc->sq, wqebb_cnt, owner); + goto tx_drop_pkts; + } + sss_nic_get_pkt_stats(tx_desc, skb); + sss_nic_init_sq_ctrl(&sqe, qinfo, sge_cnt, owner); + sss_nic_write_db(sq_desc->sq, sq_desc->cos, SQ_CFLAG_DP, + sss_nic_get_sq_local_pi(sq_desc->sq)); + return NETDEV_TX_OK; + +tx_drop_pkts: + dev_kfree_skb_any(skb); +tx_drop_pad_err: + SSSNIC_SQ_STATS_INC(sq_desc, tx_dropped); + return NETDEV_TX_OK; +} + +netdev_tx_t sss_nic_loop_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid = skb_get_queue_mapping(skb); + struct sss_nic_sq_desc *sq_desc = &nic_dev->sq_desc_group[qid]; + + return sss_nic_send_one_skb(skb, netdev, sq_desc); +} + +netdev_tx_t sss_nic_ndo_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid = skb_get_queue_mapping(skb); + + if (unlikely(!netif_carrier_ok(netdev))) { + SSSNIC_STATS_TX_DROP_INC(nic_dev); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(qid >= nic_dev->qp_res.qp_num)) { + SSSNIC_STATS_TX_INVALID_QID_INC(nic_dev); + goto out; + } + sq_desc = &nic_dev->sq_desc_group[qid]; + return sss_nic_send_one_skb(skb, netdev, sq_desc); + +out: + dev_kfree_skb_any(skb); + sq_desc = &nic_dev->sq_desc_group[0]; + SSSNIC_SQ_STATS_INC(sq_desc, tx_dropped); + return NETDEV_TX_OK; +} + +#define sss_nic_tx_free_skb(nic_dev, tx_desc) \ +do { \ + sss_nic_unmap_dma_page((nic_dev), (tx_desc)->nr_frags, (tx_desc)->dma_group); \ + dev_kfree_skb_any((tx_desc)->skb); \ + (tx_desc)->skb = NULL; \ +} while (0) + +void sss_nic_free_all_skb(struct sss_nic_dev *nic_dev, u32 sq_depth, + struct sss_nic_tx_desc *tx_desc_group) +{ + struct sss_nic_tx_desc *tx_desc = NULL; + u32 i; + + for (i = 0; i < sq_depth; i++) { + tx_desc = &tx_desc_group[i]; + if (tx_desc->skb) + sss_nic_tx_free_skb(nic_dev, tx_desc); + } +} + +#define sss_nic_stop_subqueue(nic_dev, sq_desc, wake) \ +do { \ + u16 _qid = (sq_desc)->sq->qid; \ + u64 _wake = 0; \ + struct netdev_queue *_netdev_sq; \ +\ + if (unlikely(__netif_subqueue_stopped((nic_dev)->netdev, _qid) && \ + sss_nic_get_sq_free_wqebbs((sq_desc)->sq) >= 1 && \ + test_bit(SSSNIC_INTF_UP, &(nic_dev)->flags))) { \ + _netdev_sq = netdev_get_tx_queue((sq_desc)->netdev, _qid); \ +\ + __netif_tx_lock(_netdev_sq, smp_processor_id()); \ + if (__netif_subqueue_stopped((nic_dev)->netdev, _qid)) { \ + netif_wake_subqueue((nic_dev)->netdev, _qid); \ + _wake++; \ + } \ + __netif_tx_unlock(_netdev_sq); \ + } \ +\ + *(wake) = _wake; \ +} while (0) + +int sss_nic_tx_poll(struct sss_nic_sq_desc *sq_desc, int budget) +{ + struct sss_nic_tx_desc *tx_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(sq_desc->netdev); + u64 tx_byte_cnt = 0; + u64 nr_pkt_cnt = 0; + u64 wake = 0; + u16 sw_ci; + u16 hw_ci; + u16 wqebb_cnt = 0; + int pkt_cnt = 0; + + hw_ci = sss_nic_get_sq_hw_ci(sq_desc->sq); + dma_rmb(); + sw_ci = sss_nic_get_sq_local_ci(sq_desc->sq); + + do { + tx_desc = &sq_desc->tx_desc_group[sw_ci]; + + if (hw_ci == sw_ci || + ((hw_ci - sw_ci) & sq_desc->qid_mask) < tx_desc->wqebb_cnt) + break; + + sw_ci = (sw_ci + tx_desc->wqebb_cnt) & (u16)sq_desc->qid_mask; + prefetch(&sq_desc->tx_desc_group[sw_ci]); + + tx_byte_cnt += tx_desc->bytes; + nr_pkt_cnt += tx_desc->nr_pkt_cnt; + wqebb_cnt += tx_desc->wqebb_cnt; + pkt_cnt++; + + sss_nic_tx_free_skb(nic_dev, tx_desc); + } while (likely(pkt_cnt < budget)); + + sss_nic_update_sq_local_ci(sq_desc->sq, wqebb_cnt); + + sss_nic_stop_subqueue(nic_dev, sq_desc, &wake); + + u64_stats_update_begin(&sq_desc->stats.stats_sync); + sq_desc->stats.tx_bytes += tx_byte_cnt; + sq_desc->stats.tx_packets += nr_pkt_cnt; + sq_desc->stats.wake += wake; + u64_stats_update_end(&sq_desc->stats.stats_sync); + + return pkt_cnt; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h new file mode 100644 index 00000000000000..faeca6a9368589 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_H +#define SSS_NIC_TX_H + +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +void sss_nic_free_all_skb(struct sss_nic_dev *nic_dev, u32 sq_depth, + struct sss_nic_tx_desc *tx_desc_group); +netdev_tx_t sss_nic_loop_start_xmit(struct sk_buff *skb, + struct net_device *netdev); +netdev_tx_t sss_nic_ndo_start_xmit(struct sk_buff *skb, + struct net_device *netdev); +void sss_nic_get_sq_stats(struct sss_nic_sq_desc *sq_desc, + struct sss_nic_sq_stats *stats); +int sss_nic_tx_poll(struct sss_nic_sq_desc *sq_desc, int budget); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c new file mode 100644 index 00000000000000..89b032ed177c1c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_tx_init.h" + +#define SSSNIC_SQ_EXTRA_SGE 18 + +#define SSSNIC_FLUSH_SQ_TIMEOUT 1000 + +#define SSSNIC_STOP_SQ_WAIT_TIME_MIN 900 +#define SSSNIC_STOP_SQ_WAIT_TIME_MAX 1000 +#define SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MIN 9900 +#define SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MAX 10000 + +#define SSSNIC_SQ_WQEBB_BD (SSSNIC_SQ_WQEBB_SIZE / 16) + +int sss_nic_alloc_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + int qid; + int id; + u64 bds_size; + u64 len; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_res = &qp_res->sq_res_group[qid]; + bds_size = sizeof(*sq_res->dma_group) * + (qp_res->sq_depth * SSSNIC_SQ_WQEBB_BD + SSSNIC_SQ_EXTRA_SGE); + sq_res->dma_group = kzalloc(bds_size, GFP_KERNEL); + if (!sq_res->dma_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to allocate sq %d dma info\n", qid); + goto error; + } + + len = sizeof(*sq_res->tx_desc_group) * qp_res->sq_depth; + sq_res->tx_desc_group = kzalloc(len, GFP_KERNEL); + if (!sq_res->tx_desc_group) { + kfree(sq_res->dma_group); + sq_res->dma_group = NULL; + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq %d tx desc\n", qid); + goto error; + } + } + + return 0; + +error: + for (id = 0; id < qid; id++) { + sq_res = &qp_res->sq_res_group[id]; + kfree(sq_res->dma_group); + kfree(sq_res->tx_desc_group); + sq_res->dma_group = NULL; + sq_res->tx_desc_group = NULL; + } + + return -ENOMEM; +} + +void sss_nic_free_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + u16 qid; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_res = &qp_res->sq_res_group[qid]; + + sss_nic_free_all_skb(nic_dev, qp_res->sq_depth, sq_res->tx_desc_group); + kfree(sq_res->dma_group); + kfree(sq_res->tx_desc_group); + sq_res->dma_group = NULL; + sq_res->tx_desc_group = NULL; + } +} + +void sss_nic_init_all_sq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + struct sss_nic_sq_desc *sq_desc = NULL; + u16 qid; + u32 did; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_res = &qp_res->sq_res_group[qid]; + + sq_desc->q_depth = qp_res->sq_depth; + sq_desc->qid_mask = qp_res->sq_depth - 1; + + sq_desc->tx_desc_group = sq_res->tx_desc_group; + for (did = 0; did < qp_res->sq_depth; did++) + sq_desc->tx_desc_group[did].dma_group = + &sq_res->dma_group[did * SSSNIC_SQ_WQEBB_BD]; + + sq_desc->sq = &nic_dev->nic_io->sq_group[qid]; + } +} + +int sss_nic_alloc_sq_desc_group(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_sq_stats *sq_stats = NULL; + u16 sq_num = nic_dev->max_qp_num; + u16 qid; + + nic_dev->sq_desc_group = kcalloc(sq_num, sizeof(*nic_dev->sq_desc_group), GFP_KERNEL); + if (!nic_dev->sq_desc_group) + return -ENOMEM; + + for (qid = 0; qid < sq_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_stats = &sq_desc->stats; + sq_desc->qid = qid; + sq_desc->dev = nic_dev->dev_hdl; + sq_desc->netdev = nic_dev->netdev; + sq_desc->qid_mask = nic_dev->qp_res.sq_depth - 1; + sq_desc->q_depth = nic_dev->qp_res.sq_depth; + u64_stats_init(&sq_stats->stats_sync); + } + + return 0; +} + +void sss_nic_free_sq_desc_group(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->sq_desc_group); + nic_dev->sq_desc_group = NULL; +} + +static bool sss_nic_sq_is_null(struct sss_nic_io_queue *sq) +{ + u16 sw_pi = sss_nic_get_sq_local_pi(sq); + u16 hw_ci = sss_nic_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +static int sss_nic_stop_sq(struct sss_nic_dev *nic_dev, u16 qid) +{ + int ret; + unsigned long timeout; + struct sss_nic_io_queue *sq = nic_dev->sq_desc_group[qid].sq; + + timeout = msecs_to_jiffies(SSSNIC_FLUSH_SQ_TIMEOUT) + jiffies; + do { + if (sss_nic_sq_is_null(sq)) + return 0; + + usleep_range(SSSNIC_STOP_SQ_WAIT_TIME_MIN, SSSNIC_STOP_SQ_WAIT_TIME_MAX); + } while (time_before(jiffies, timeout)); + + timeout = msecs_to_jiffies(SSSNIC_FLUSH_SQ_TIMEOUT) + jiffies; + do { + if (sss_nic_sq_is_null(sq)) + return 0; + + ret = sss_nic_force_drop_tx_pkt(nic_dev); + if (ret != 0) + break; + + usleep_range(SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MIN, + SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MAX); + } while (time_before(jiffies, timeout)); + + if (!sss_nic_sq_is_null(sq)) + return -EFAULT; + + return 0; +} + +void sss_nic_flush_all_sq(struct sss_nic_dev *nic_dev) +{ + u16 qid = 0; + int ret = 0; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + ret = sss_nic_stop_sq(nic_dev, qid); + if (ret != 0) + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to stop sq%u\n", qid); + } +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h new file mode 100644 index 00000000000000..c72af131707ebe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_INIT_H +#define SSS_NIC_TX_INIT_H + +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +int sss_nic_alloc_sq_desc_group(struct sss_nic_dev *nic_dev); +void sss_nic_free_sq_desc_group(struct sss_nic_dev *nic_dev); +int sss_nic_alloc_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_free_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_init_all_sq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_flush_all_sq(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c new file mode 100644 index 00000000000000..1c585ad7a15f66 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_event.h" + +static u8 vf_link_state; +module_param(vf_link_state, byte, 0444); +MODULE_PARM_DESC(vf_link_state, + "Set vf link state, 0 - link auto, 1 - always link up, 2 - always link down. - default is 0."); + +/* In order to adapt different linux version */ +enum { + SSSNIC_IFLA_VF_LINK_STATE_AUTO, + SSSNIC_IFLA_VF_LINK_STATE_ENABLE, + SSSNIC_IFLA_VF_LINK_STATE_DISABLE, + SSSNIC_IFLA_VF_LINK_STATE_MAX +}; + +#define SSSNIC_CVLAN_INSERT_ENABLE 0x1 +#define SSSNIC_QINQ_INSERT_ENABLE 0X3 + +#define SSSNIC_GET_VLAN_TAG(vlan_id, qos) ((vlan_id) + (u16)((qos) << VLAN_PRIO_SHIFT)) + +typedef void (*sss_nic_link_vf_handler_t)(struct sss_nic_vf_info *); +typedef u8 (*sss_nic_link_state_handler_t)(struct sss_nic_io *nic_io, u16 vf_id); + +static int sss_nic_set_vlan_mode(struct sss_nic_io *nic_io, u16 func_id, + u16 vlan_tag, u16 qid, u32 vlan_mode) +{ + int ret; + u64 out_param = 0; + struct sss_nic_vlan_ctx *vlan_ctx = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate send buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(*vlan_ctx); + vlan_ctx = (struct sss_nic_vlan_ctx *)msg_buf->buf; + vlan_ctx->sel = 0; /* TPID0 in IPSU */ + vlan_ctx->func_id = func_id; + vlan_ctx->mode = vlan_mode; + vlan_ctx->qid = qid; + vlan_ctx->tag = vlan_tag; + + sss_cpu_to_be32(vlan_ctx, sizeof(*vlan_ctx)); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_VLAN_CTX, msg_buf, + &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Fail to set vlan ctx, ret: %d, out_param: 0x%llx\n", + ret, out_param); + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return 0; +} + +int sss_nic_set_vf_vlan(struct sss_nic_io *nic_io, u8 opcode, u16 vlan_id, u8 qos, int vf_id) +{ + int ret; + u32 vlan_mode; + u16 os_id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + u16 vlan_tag = SSSNIC_GET_VLAN_TAG(vlan_id, qos); + u16 func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + struct sss_nic_mbx_vf_vlan_cfg cmd_config_info = {0}; + u16 out_len = sizeof(cmd_config_info); + + if (vlan_id == 0 && opcode == SSSNIC_MBX_OPCODE_DEL) + return 0; + + cmd_config_info.vlan_id = vlan_id; + cmd_config_info.func_id = func_id; + cmd_config_info.opcode = opcode; + cmd_config_info.qos = qos; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CFG_VF_VLAN, + &cmd_config_info, sizeof(cmd_config_info), + &cmd_config_info, &out_len); + if (ret != 0 || out_len == 0 || cmd_config_info.head.state != SSS_MGMT_CMD_SUCCESS) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d vlan, ret: %d, status: 0x%x, out_len: 0x%x\n", + os_id, ret, cmd_config_info.head.state, out_len); + return -EFAULT; + } + + vlan_mode = (opcode == SSSNIC_MBX_OPCODE_ADD) ? + SSSNIC_QINQ_INSERT_ENABLE : SSSNIC_CVLAN_INSERT_ENABLE; + + ret = sss_nic_set_vlan_mode(nic_io, func_id, vlan_tag, + SSSNIC_CONFIG_ALL_QUEUE_VLAN_CTX, vlan_mode); + if (ret != 0) { + cmd_config_info.opcode = (opcode == SSSNIC_MBX_OPCODE_DEL) ? + SSSNIC_MBX_OPCODE_ADD : SSSNIC_MBX_OPCODE_DEL; + sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CFG_VF_VLAN, + &cmd_config_info, sizeof(cmd_config_info), + &cmd_config_info, &out_len); + nic_err(nic_io->dev_hdl, + "Fail to set VF %d vlan context, ret: %d\n", os_id, ret); + } + + return ret; +} + +int sss_nic_create_vf_vlan(struct sss_nic_io *nic_io, int vf_id, u16 vlan, u8 qos) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_ADD, vlan, qos, vf_id); + if (ret != 0) + return ret; + + nic_io->vf_info_group[id].pf_qos = qos; + nic_io->vf_info_group[id].pf_vlan = vlan; + + nic_info(nic_io->dev_hdl, "Add vf vlan VLAN %u, QOS 0x%x on VF %d\n", + vlan, qos, id); + + return 0; +} + +int sss_nic_destroy_vf_vlan(struct sss_nic_io *nic_io, int vf_id) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info_group; + + vf_info_group = nic_io->vf_info_group; + + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_DEL, + vf_info_group[id].pf_vlan, + vf_info_group[id].pf_qos, vf_id); + if (ret != 0) + return ret; + + nic_info(nic_io->dev_hdl, "Kill vf VLAN %u on VF %d\n", + vf_info_group[id].pf_vlan, id); + + vf_info_group[id].pf_qos = 0; + vf_info_group[id].pf_vlan = 0; + + return 0; +} + +u16 sss_nic_vf_info_vlan_prio(struct sss_nic_io *nic_io, int vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + u16 vlan_prio; + u16 pf_vlan; + u8 pf_qos; + + pf_vlan = nic_io->vf_info_group[id].pf_vlan; + pf_qos = nic_io->vf_info_group[id].pf_qos; + + vlan_prio = SSSNIC_GET_VLAN_PRIO(pf_vlan, pf_qos); + + return vlan_prio; +} + +static u8 sss_nic_ifla_vf_link_state_auto(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = false; + nic_io->vf_info_group[id].link_up = !!nic_io->link_status; + + return nic_io->link_status; +} + +static u8 sss_nic_ifla_vf_link_state_enable(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = true; + nic_io->vf_info_group[id].link_up = true; + + return SSSNIC_LINK_UP; +} + +static u8 sss_nic_ifla_vf_link_state_disable(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = true; + nic_io->vf_info_group[id].link_up = false; + + return SSSNIC_LINK_DOWN; +} + +int sss_nic_set_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, int link) +{ + u8 link_status = 0; + struct sss_nic_vf_info *vf_info = NULL; + + sss_nic_link_state_handler_t handler[SSSNIC_IFLA_VF_LINK_STATE_MAX] = { + sss_nic_ifla_vf_link_state_auto, + sss_nic_ifla_vf_link_state_enable, + sss_nic_ifla_vf_link_state_disable, + }; + + if (link >= SSSNIC_IFLA_VF_LINK_STATE_MAX) + return -EINVAL; + + if (handler[link]) + link_status = handler[link](nic_io, SSSNIC_HW_VF_ID_TO_OS(vf_id)); + + /* Notify the VF of its new link state */ + vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + if (vf_info->attach) + sss_nic_notify_vf_link_state(nic_io, vf_id, link_status); + + return 0; +} + +int sss_nic_set_vf_spoofchk(struct sss_nic_io *nic_io, u16 vf_id, bool spoofchk) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = NULL; + struct sss_nic_mbx_set_spoofchk cmd_spoofchk_cfg = {0}; + u16 out_len = sizeof(cmd_spoofchk_cfg); + + cmd_spoofchk_cfg.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + cmd_spoofchk_cfg.state = !!spoofchk; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SET_SPOOPCHK_STATE, + &cmd_spoofchk_cfg, + sizeof(cmd_spoofchk_cfg), &cmd_spoofchk_cfg, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_spoofchk_cfg)) { + nic_err(nic_io->dev_hdl, "Fail to set VF(%d) spoofchk, ret: %d, status: 0x%x, out_len: 0x%x\n", + id, ret, cmd_spoofchk_cfg.head.state, out_len); + ret = -EINVAL; + } + + vf_info = nic_io->vf_info_group; + vf_info[id].spoofchk = !!spoofchk; + + return ret; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int sss_nic_set_vf_trust(struct sss_nic_io *nic_io, u16 vf_id, bool trust) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + if (vf_id > nic_io->max_vf_num) + return -EINVAL; + + nic_io->vf_info_group[id].trust = !!trust; + + return 0; +} + +bool sss_nic_get_vf_trust(struct sss_nic_io *nic_io, int vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + if (vf_id > nic_io->max_vf_num) + return -EINVAL; + + return !!nic_io->vf_info_group[id].trust; +} +#endif + +int sss_nic_set_vf_tx_rate_limit(struct sss_nic_io *nic_io, u16 vf_id, u32 min_rate, u32 max_rate) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_mbx_tx_rate_cfg cmd_cfg = {0}; + u16 out_len = sizeof(cmd_cfg); + + cmd_cfg.min_rate = min_rate; + cmd_cfg.max_rate = max_rate; + cmd_cfg.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + &cmd_cfg, sizeof(cmd_cfg), &cmd_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_cfg)) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d max_rate %u, min_rate %u, ret: %d, status: 0x%x, out_len: 0x%x\n", + id, max_rate, min_rate, ret, cmd_cfg.head.state, + out_len); + return -EIO; + } + + nic_io->vf_info_group[id].max_rate = max_rate; + nic_io->vf_info_group[id].min_rate = min_rate; + + return 0; +} + +void sss_nic_get_vf_attribute(struct sss_nic_io *nic_io, u16 vf_id, + struct ifla_vf_info *ifla_vf) +{ + struct sss_nic_vf_info *vf_info; + + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + + ether_addr_copy(ifla_vf->mac, vf_info->user_mac); + ifla_vf->vf = SSSNIC_HW_VF_ID_TO_OS(vf_id); + ifla_vf->qos = vf_info->pf_qos; + ifla_vf->vlan = vf_info->pf_vlan; + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ifla_vf->spoofchk = vf_info->spoofchk; +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST + ifla_vf->trusted = vf_info->trust; +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ifla_vf->min_tx_rate = vf_info->min_rate; + ifla_vf->max_tx_rate = vf_info->max_rate; +#else + ifla_vf->tx_rate = vf_info->max_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (!vf_info->link_forced) + ifla_vf->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf_info->link_up) + ifla_vf->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ifla_vf->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +} + +static void sss_nic_init_link_disable_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = true; + vf_info->link_up = false; +} + +static void sss_nic_init_link_enable_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = true; + vf_info->link_up = true; +} + +static void sss_nic_init_link_auto_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = false; +} + +static int sss_nic_init_vf_info(struct sss_nic_io *nic_io, u16 vf_id) +{ + u8 link_state; + struct sss_nic_vf_info *vf_info_group = nic_io->vf_info_group; + sss_nic_link_vf_handler_t handler[SSSNIC_IFLA_VF_LINK_STATE_MAX] = { + sss_nic_init_link_auto_vf, + sss_nic_init_link_enable_vf, + sss_nic_init_link_disable_vf + }; + + if (vf_link_state >= SSSNIC_IFLA_VF_LINK_STATE_MAX) { + vf_link_state = SSSNIC_IFLA_VF_LINK_STATE_AUTO; + nic_warn(nic_io->dev_hdl, "Invalid vf_link_state: %u out of range[%u - %u], adjust to %d\n", + vf_link_state, SSSNIC_IFLA_VF_LINK_STATE_AUTO, + SSSNIC_IFLA_VF_LINK_STATE_DISABLE, SSSNIC_IFLA_VF_LINK_STATE_AUTO); + } + + link_state = vf_link_state; + if (link_state < SSSNIC_IFLA_VF_LINK_STATE_MAX) { + handler[link_state](&vf_info_group[vf_id]); + } else { + nic_err(nic_io->dev_hdl, "Fail to input vf_link_state: %u\n", + link_state); + return -EINVAL; + } + + return 0; +} + +static int sss_nic_register_vf_to_hw(struct sss_nic_io *nic_io) +{ + u16 out_len; + int ret; + struct sss_nic_mbx_attach_vf cmd_register_info = {0}; + + cmd_register_info.op_register = 1; + out_len = sizeof(cmd_register_info); + ret = sss_mbx_send_to_pf(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_MBX_OPCODE_VF_REGISTER, + &cmd_register_info, sizeof(cmd_register_info), + &cmd_register_info, &out_len, 0, + SSS_CHANNEL_NIC); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_register_info)) { + nic_err(nic_io->dev_hdl, "Fail to register VF, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_register_info.head.state, out_len); + return -EIO; + } + + return 0; +} + +static void sss_nic_unregister_vf_to_hw(struct sss_nic_io *nic_io) +{ + int ret; + struct sss_nic_mbx_attach_vf cmd_register_info = {0}; + u16 out_len = sizeof(cmd_register_info); + + cmd_register_info.op_register = 0; + + ret = sss_mbx_send_to_pf(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, SSSNIC_MBX_OPCODE_VF_REGISTER, + &cmd_register_info, sizeof(cmd_register_info), &cmd_register_info, + &out_len, 0, SSS_CHANNEL_NIC); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_register_info)) + nic_err(nic_io->dev_hdl, + "Fail to unregister VF, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_register_info.head.state, out_len); +} + +static void sss_nic_vf_unregister(struct sss_nic_io *nic_io) +{ + sss_nic_unregister_vf_to_hw(nic_io); + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); +} + +static int sss_nic_vf_register(struct sss_nic_io *nic_io) +{ + int ret; + + ret = sss_register_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_vf_event_handler); + if (ret != 0) + return ret; + + ret = sss_register_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_vf_mag_event_handler); + if (ret != 0) + goto reg_cb_error; + + ret = sss_nic_register_vf_to_hw(nic_io); + if (ret != 0) + goto register_vf_error; + + return 0; + +register_vf_error: + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + +reg_cb_error: + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + + return ret; +} + +void sss_nic_deinit_pf_vf_info(struct sss_nic_io *nic_io) +{ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return; + kfree(nic_io->vf_info_group); + nic_io->vf_info_group = NULL; +} + +int sss_nic_init_pf_vf_info(struct sss_nic_io *nic_io) +{ + u16 i; + int ret; + u32 len; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + nic_io->max_vf_num = sss_get_max_vf_num(nic_io->hwdev); + if (nic_io->max_vf_num == 0) + return 0; + + len = sizeof(*nic_io->vf_info_group) * nic_io->max_vf_num; + nic_io->vf_info_group = kzalloc(len, GFP_KERNEL); + if (!nic_io->vf_info_group) + return -ENOMEM; + + for (i = 0; i < nic_io->max_vf_num; i++) { + ret = sss_nic_init_vf_info(nic_io, i); + if (ret != 0) + goto init_vf_info_error; + } + + return 0; + +init_vf_info_error: + kfree(nic_io->vf_info_group); + nic_io->vf_info_group = NULL; + + return ret; +} + +int sss_nic_register_io_callback(struct sss_nic_io *nic_io) +{ + int ret; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return sss_nic_vf_register(nic_io); + + ret = sss_register_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_pf_event_handler); + if (ret != 0) + return ret; + + ret = sss_register_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_pf_mag_event_handler); + if (ret != 0) + goto register_pf_mag_event_handler; + + ret = sss_register_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_pf_mbx_handler); + if (ret != 0) + goto register_pf_mbx_cb_error; + + ret = sss_register_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_pf_mag_mbx_handler); + if (ret != 0) + goto register_pf_mag_mbx_cb_error; + + return 0; + +register_pf_mag_mbx_cb_error: + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + +register_pf_mbx_cb_error: + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + +register_pf_mag_event_handler: + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + + return ret; +} + +void sss_nic_unregister_io_callback(struct sss_nic_io *nic_io) +{ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) { + sss_nic_vf_unregister(nic_io); + } else { + if (nic_io->vf_info_group) { + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + } + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + } +} + +static void sss_nic_clear_vf_info(struct sss_nic_io *nic_io, u16 vf_id) +{ + u16 func_id; + struct sss_nic_vf_info *vf_info; + + func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + if (vf_info->specified_mac) + sss_nic_del_mac(nic_io->nic_dev, vf_info->drv_mac, + vf_info->pf_vlan, func_id, SSS_CHANNEL_NIC); + + if (sss_nic_vf_info_vlan_prio(nic_io, vf_id)) + sss_nic_destroy_vf_vlan(nic_io, vf_id); + + if (vf_info->max_rate && SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + sss_nic_set_vf_tx_rate_limit(nic_io, vf_id, 0, 0); + + if (vf_info->spoofchk) + sss_nic_set_vf_spoofchk(nic_io, vf_id, false); + +#ifdef HAVE_NDO_SET_VF_TRUST + if (vf_info->trust) + sss_nic_set_vf_trust(nic_io, vf_id, false); +#endif + + memset(vf_info, 0, sizeof(*vf_info)); + sss_nic_init_vf_info(nic_io, SSSNIC_HW_VF_ID_TO_OS(vf_id)); +} + +void sss_nic_clear_all_vf_info(struct sss_nic_io *nic_io) +{ + u16 i; + + for (i = 0; i < nic_io->max_vf_num; i++) + sss_nic_clear_vf_info(nic_io, SSSNIC_OS_VF_ID_TO_HW(i)); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h new file mode 100644 index 00000000000000..4256e118558e12 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_VF_CFG_H +#define SSS_NIC_VF_CFG_H + +#include "sss_nic_cfg_vf_define.h" +#include "sss_nic_io_define.h" + +#define SSSNIC_GET_VF_SPOOFCHK(nic_io, vf_id) \ + (!!(nic_io)->vf_info_group[vf_id].spoofchk) + +int sss_nic_set_vf_spoofchk(struct sss_nic_io *nic_io, u16 vf_id, bool spoofchk); + +int sss_nic_create_vf_vlan(struct sss_nic_io *nic_io, int vf_id, u16 vlan, u8 qos); + +int sss_nic_destroy_vf_vlan(struct sss_nic_io *nic_io, int vf_id); + +u16 sss_nic_vf_info_vlan_prio(struct sss_nic_io *nic_io, int vf_id); + +int sss_nic_set_vf_tx_rate_limit(struct sss_nic_io *nic_io, u16 vf_id, u32 min_rate, u32 max_rate); + +void sss_nic_get_vf_attribute(struct sss_nic_io *nic_io, u16 vf_id, + struct ifla_vf_info *ifla_vf); + +int sss_nic_set_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, int link); + +void sss_nic_clear_all_vf_info(struct sss_nic_io *nic_io); + +#ifdef HAVE_NDO_SET_VF_TRUST +bool sss_nic_get_vf_trust(struct sss_nic_io *nic_io, int vf_id); +int sss_nic_set_vf_trust(struct sss_nic_io *nic_io, u16 vf_id, bool trust); +#endif + +int sss_nic_set_vf_vlan(struct sss_nic_io *nic_io, u8 opcode, u16 vid, + u8 qos, int vf_id); + +int sss_nic_register_io_callback(struct sss_nic_io *nic_io); + +void sss_nic_unregister_io_callback(struct sss_nic_io *nic_io); + +int sss_nic_init_pf_vf_info(struct sss_nic_io *nic_io); + +void sss_nic_deinit_pf_vf_info(struct sss_nic_io *nic_io); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h new file mode 100644 index 00000000000000..4aa2a966755632 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_H +#define SSS_TOOL_NIC_H + +#define SSS_TOOL_DCB_OPCODE_WR BIT(0) /* 1 - write, 0 - read */ + +#define SSS_TOOL_MSG_QOS_DEV_TRUST BIT(0) +#define SSS_TOOL_MSG_QOS_DEV_DFT_COS BIT(1) +#define SSS_TOOL_MSG_QOS_DEV_PCP2COS BIT(2) +#define SSS_TOOL_MSG_QOS_DEV_DSCP2COS BIT(3) + +struct sss_tool_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +struct sss_tool_wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +struct sss_tool_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct sss_tool_sq_info { + u16 q_id; + u16 pi; + u16 ci; /* sw_ci */ + u16 fi; /* hw_ci */ + u32 q_depth; + u16 pi_reverse; /* TODO: what is this? */ + u16 wqebb_size; + u8 priority; + u16 *ci_addr; + u64 cla_addr; + void *slq_handle; + /* TODO: NIC don't use direct wqe */ + struct sss_tool_hw_page direct_wqe; + struct sss_tool_hw_page doorbell; + u32 page_idx; + u32 glb_sq_id; +}; + +struct sss_tool_rq_info { + u16 q_id; + u16 delta; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u8 coalesc_timer_cfg; + u8 pending_limt; + u16 msix_idx; + u32 msix_vector; +}; + +struct sss_tool_msg_head { + u8 status; + u8 rsvd1[3]; +}; + +struct sss_tool_dcb_state { + struct sss_tool_msg_head head; + + u16 op_code; /* 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 0 - disable, 1 - enable dcb */ + u8 rsvd; +}; + +struct sss_tool_qos_dev_cfg { + struct sss_tool_msg_head head; + + u8 op_code; /* 0:get 1: set */ + u8 rsvd0; + u16 cfg_bitmap; /* bit0 - trust, bit1 - dft_cos, bit2 - pcp2cos, bit3 - dscp2cos */ + + u8 trust; /* 0 - pcp, 1 - dscp */ + u8 dft_cos; + u16 rsvd1; + u8 pcp2cos[8]; /* 必须8个一起配置 */ + + /* 配置dscp2cos时,若cos值设置为0xFF*/ + /*驱动则忽略此dscp优先级的配置*/ + /*允许一次性配置多个dscp跟cos的映射关系 */ + u8 dscp2cos[64]; + u32 rsvd2[4]; +}; + +struct sss_tool_qos_cos_cfg { + struct sss_tool_msg_head head; + + u8 port_id; + u8 func_cos_bitmap; + u8 port_cos_bitmap; + u8 func_max_cos_num; + u32 rsvd2[4]; +}; + +#endif /* SSS_TOOL_NIC_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c new file mode 100644 index 00000000000000..7e3ea19b74d1b9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_cfg.h" +#include "sss_nic_dcb.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_tool_nic_dcb.h" + +#define SSS_TOOL_DBG_DFLT_DSCP_VAL 0xFF + +static int sss_tool_update_pcp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + u8 valid_cos_bitmap = 0; + u8 cos_num = 0; + int i; + + if (!(qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_PCP2COS)) + return 0; + + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) { + if (!(nic_dev->dft_func_cos_bitmap & BIT(qos_cfg->pcp2cos[i]))) { + tool_err("Invalid pcp cos:%u, func cos valid map is %u", + qos_cfg->pcp2cos[i], nic_dev->dft_func_cos_bitmap); + return -EINVAL; + } + + if ((BIT(qos_cfg->pcp2cos[i]) & valid_cos_bitmap) == 0) { + cos_num++; + valid_cos_bitmap |= (u8)BIT(qos_cfg->pcp2cos[i]); + } + } + + nic_dev->backup_dcb_cfg.pcp_valid_cos_map = valid_cos_bitmap; + nic_dev->backup_dcb_cfg.pcp_user_cos_num = cos_num; + memcpy(nic_dev->backup_dcb_cfg.pcp2cos, qos_cfg->pcp2cos, sizeof(qos_cfg->pcp2cos)); + + return 0; +} + +static int sss_tool_update_dscp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + u8 valid_cos_bitmap = 0; + u8 cos_num = 0; + u8 cos; + int i; + + if (!(qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_DSCP2COS)) + return 0; + + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) { + if (qos_cfg->dscp2cos[i] != SSS_TOOL_DBG_DFLT_DSCP_VAL) + cos = qos_cfg->dscp2cos[i]; + else + cos = nic_dev->backup_dcb_cfg.dscp2cos[i]; + + if (cos >= SSSNIC_DCB_UP_MAX || !(nic_dev->dft_func_cos_bitmap & BIT(cos))) { + tool_err("Invalid dscp cos:%u, func cos valid map is %u", + cos, nic_dev->dft_func_cos_bitmap); + return -EINVAL; + } + + if ((BIT(cos) & valid_cos_bitmap) == 0) { + cos_num++; + valid_cos_bitmap |= (u8)BIT(cos); + } + } + + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) { + if (qos_cfg->dscp2cos[i] != SSS_TOOL_DBG_DFLT_DSCP_VAL) + nic_dev->backup_dcb_cfg.dscp2cos[i] = qos_cfg->dscp2cos[i]; + else + nic_dev->backup_dcb_cfg.dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + } + + nic_dev->backup_dcb_cfg.dscp_valid_cos_map = valid_cos_bitmap; + nic_dev->backup_dcb_cfg.dscp_user_cos_num = cos_num; + + return 0; +} + +static int sss_tool_update_pcp_dscp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + int ret; + + ret = sss_tool_update_pcp_cfg(nic_dev, qos_cfg); + if (ret != 0) { + tool_err("Fail to update pcp cfg\n"); + return ret; + } + + ret = sss_tool_update_dscp_cfg(nic_dev, qos_cfg); + if (ret != 0) + tool_err("Fail to update dscp cfg\n"); + + return ret; +} + +static int sss_tool_update_wanted_qos_cfg(struct sss_nic_dev *nic_dev, + const void *in_buf) +{ + const struct sss_tool_qos_dev_cfg *qos_cfg = in_buf; + u8 valid_cos_bitmap; + u8 cos_num; + int ret; + + if (qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_TRUST) { + if (qos_cfg->trust > DCB_DSCP) { + tool_err("Invalid trust:%u of qos cfg\n", qos_cfg->trust); + return -EINVAL; + } + + nic_dev->backup_dcb_cfg.trust = qos_cfg->trust; + } + + if (qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_DFT_COS) { + if (!(BIT(qos_cfg->dft_cos) & nic_dev->dft_func_cos_bitmap)) { + tool_err("Invalid default cos:%u of qos cfg\n", qos_cfg->dft_cos); + return -EINVAL; + } + + nic_dev->backup_dcb_cfg.default_cos = qos_cfg->dft_cos; + } + + ret = sss_tool_update_pcp_dscp_cfg(nic_dev, qos_cfg); + if (ret != 0) + return ret; + + if (nic_dev->backup_dcb_cfg.trust != DCB_PCP) { + valid_cos_bitmap = nic_dev->backup_dcb_cfg.dscp_valid_cos_map; + cos_num = nic_dev->backup_dcb_cfg.dscp_user_cos_num; + } else { + valid_cos_bitmap = nic_dev->backup_dcb_cfg.pcp_valid_cos_map; + cos_num = nic_dev->backup_dcb_cfg.pcp_user_cos_num; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + if (cos_num > nic_dev->qp_res.qp_num) { + tool_err("Invalid cos num, DCB is on, cos num:%d need less than channel num:%u\n", + cos_num, nic_dev->qp_res.qp_num); + return -EOPNOTSUPP; + } + } + + if (!(BIT(nic_dev->backup_dcb_cfg.default_cos) & valid_cos_bitmap)) { + tool_info("Success to update cos %u to %u\n", + nic_dev->backup_dcb_cfg.default_cos, (u8)fls(valid_cos_bitmap) - 1); + nic_dev->backup_dcb_cfg.default_cos = (u8)fls(valid_cos_bitmap) - 1; + } + + return 0; +} + +static int sss_tool_set_tx_cos_state(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + int ret; + u8 i; + struct sss_nic_dcb_info dcb_info = {0}; + struct sss_nic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + + dcb_info.trust = dcb_cfg->trust; + dcb_info.default_cos = dcb_cfg->default_cos; + dcb_info.dcb_on = dcb_en; + + if (!dcb_en) { + memset(dcb_info.dscp2cos, dcb_cfg->default_cos, sizeof(dcb_info.dscp2cos)); + memset(dcb_info.pcp2cos, dcb_cfg->default_cos, sizeof(dcb_info.pcp2cos)); + + } else { + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) + dcb_info.dscp2cos[i] = dcb_cfg->dscp2cos[i]; + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) + dcb_info.pcp2cos[i] = dcb_cfg->pcp2cos[i]; + } + + ret = sss_nic_set_dcb_info(nic_dev->nic_io, &dcb_info); + if (ret != 0) + tool_err("Fail to set dcb state\n"); + + return ret; +} + +static int sss_tool_configure_dcb_hw(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + ret = sss_nic_set_hw_dcb_state(nic_dev, 1, dcb_en); + if (ret != 0) { + tool_err("Fail to set dcb state\n"); + return ret; + } + + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); + sss_nic_update_sq_cos(nic_dev, dcb_en); + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + /* VF does not support DCB, use the default cos */ + nic_dev->hw_dcb_cfg.default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + + return 0; + } + + ret = sss_tool_set_tx_cos_state(nic_dev, dcb_en); + if (ret != 0) { + tool_err("Fail to set tx cos state\n"); + goto set_tx_cos_fail; + } + + ret = sss_nic_update_rx_rss(nic_dev); + if (ret != 0) { + tool_err("Fail to configure rx\n"); + goto update_rx_rss_fail; + } + + if (!dcb_en) + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + else + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + return 0; +update_rx_rss_fail: + sss_tool_set_tx_cos_state(nic_dev, dcb_en ? 0 : 1); + +set_tx_cos_fail: + sss_nic_update_sq_cos(nic_dev, dcb_en ? 0 : 1); + sss_nic_set_hw_dcb_state(nic_dev->hwdev, 1, dcb_en ? 0 : 1); + + return ret; +} + +static int sss_tool_setup_cos(struct net_device *netdev, u8 cos) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (cos > nic_dev->max_cos_num) { + tool_err("Invalid num_tc: %u more then max cos: %u\n", cos, nic_dev->max_cos_num); + return -EINVAL; + } + + if (cos && SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) { + tool_err("Fail to enable DCB while Symmetric RSS is enabled\n"); + return -EOPNOTSUPP; + } + + return sss_tool_configure_dcb_hw(nic_dev, cos ? 1 : 0); +} + +static void sss_tool_change_qos_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_cfg) +{ + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + sss_nic_sync_dcb_cfg(nic_dev, dcb_cfg); + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); + + clear_bit(SSSNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); +} + +static int sss_tool_dcbcfg_set_up_bitmap(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + struct sss_nic_dcb_config old_dcb_cfg; + bool netif_run = false; + + memcpy(&old_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(struct sss_nic_dcb_config)); + + if (!memcmp(&nic_dev->backup_dcb_cfg, &old_dcb_cfg, sizeof(struct sss_nic_dcb_config))) { + tool_info("Valid up bitmap is the same, nothing has to change\n"); + return 0; + } + + rtnl_lock(); + if (netif_running(nic_dev->netdev)) { + sss_nic_vport_down(nic_dev); + netif_run = true; + } + + if (test_and_set_bit(SSSNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { + tool_warn("Cos up map setup in inprocess, please try again later\n"); + ret = -EFAULT; + goto set_qos_cfg_fail; + } + + sss_tool_change_qos_cfg(nic_dev, &nic_dev->backup_dcb_cfg); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + ret = sss_tool_setup_cos(nic_dev->netdev, user_cos_num); + if (ret != 0) + goto setup_cos_fail; + } + + if (netif_run) { + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto vport_up_fail; + } + + rtnl_unlock(); + + return 0; + +vport_up_fail: + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) + sss_tool_setup_cos(nic_dev->netdev, user_cos_num ? 0 : user_cos_num); + +setup_cos_fail: + sss_tool_change_qos_cfg(nic_dev, &old_dcb_cfg); + +set_qos_cfg_fail: + if (netif_run) + sss_nic_vport_up(nic_dev); + + rtnl_unlock(); + + return ret; +} + +int sss_tool_dcb_mt_qos_map(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u8 i; + struct sss_tool_qos_dev_cfg *qos_out = out_buf; + + if (!out_buf || !out_len || !in_buf) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*qos_out) || *out_len != sizeof(*qos_out)) { + tool_err("Invalid in len: %u or outlen: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*qos_out)); + return -EINVAL; + } + + memcpy(qos_out, in_buf, sizeof(*qos_out)); + qos_out->head.status = 0; + if (qos_out->op_code & SSS_TOOL_DCB_OPCODE_WR) { + memcpy(&nic_dev->backup_dcb_cfg, &nic_dev->hw_dcb_cfg, + sizeof(struct sss_nic_dcb_config)); + ret = sss_tool_update_wanted_qos_cfg(nic_dev, in_buf); + if (ret != 0) { + qos_out->head.status = SSS_TOOL_EINVAL; + return 0; + } + + ret = sss_tool_dcbcfg_set_up_bitmap(nic_dev); + if (ret != 0) + qos_out->head.status = SSS_TOOL_EIO; + } else { + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) + qos_out->dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) + qos_out->pcp2cos[i] = nic_dev->hw_dcb_cfg.pcp2cos[i]; + qos_out->trust = nic_dev->hw_dcb_cfg.trust; + qos_out->dft_cos = nic_dev->hw_dcb_cfg.default_cos; + } + + return 0; +} + +int sss_tool_dcb_mt_dcb_state(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + struct sss_tool_dcb_state *dcb_out = out_buf; + const struct sss_tool_dcb_state *dcb_in = in_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*dcb_in) || *out_len != sizeof(*dcb_out)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*dcb_in)); + return -EINVAL; + } + + memcpy(dcb_out, dcb_in, sizeof(*dcb_in)); + dcb_out->head.status = 0; + + if (!(dcb_in->op_code & SSS_TOOL_DCB_OPCODE_WR)) { + dcb_out->state = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + return 0; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE) == dcb_in->state) + return 0; + + if (dcb_in->state && user_cos_num > nic_dev->qp_res.qp_num) { + tool_err("Fail to mt dcb state, cos num %u larger than channel num %u\n", + user_cos_num, nic_dev->qp_res.qp_num); + return -EOPNOTSUPP; + } + + rtnl_lock(); + if (netif_running(nic_dev->netdev)) { + sss_nic_vport_down(nic_dev); + ret = sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? user_cos_num : 0); + if (ret != 0) { + sss_nic_vport_up(nic_dev); + rtnl_unlock(); + return ret; + } + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) { + sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? 0 : user_cos_num); + sss_nic_vport_up(nic_dev); + } + + rtnl_unlock(); + return ret; + } + + ret = sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? user_cos_num : 0); + rtnl_unlock(); + + return ret; +} + +int sss_tool_dcb_mt_hw_qos_get(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_qos_cos_cfg *out_cfg = out_buf; + const struct sss_tool_qos_cos_cfg *in_cfg = in_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*in_cfg) || *out_len != sizeof(*out_cfg)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*in_cfg)); + return -EINVAL; + } + + memcpy(out_cfg, in_cfg, sizeof(*in_cfg)); + out_cfg->func_max_cos_num = nic_dev->max_cos_num; + out_cfg->head.status = 0; + out_cfg->port_cos_bitmap = (u8)nic_dev->dft_port_cos_bitmap; + out_cfg->func_cos_bitmap = (u8)nic_dev->dft_func_cos_bitmap; + out_cfg->port_id = sss_get_phy_port_id(nic_dev->hwdev); + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h new file mode 100644 index 00000000000000..1fc71d5a65f8f4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_DCB_H +#define SSS_TOOL_NIC_DCB_H + +int sss_tool_dcb_mt_qos_map(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_dcb_mt_dcb_state(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_dcb_mt_hw_qos_get(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c new file mode 100644 index 00000000000000..08b9e8ae27c4ad --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_mag_cfg.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_tool_nic_dcb.h" +#include "sss_tool_nic_qp_info.h" +#include "sss_tool_nic_phy_attr.h" +#include "sss_tool_nic_stats.h" +#include "sss_tool_nic_func.h" + +typedef int (*sss_tool_cmd_func)(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len); + +struct sss_tool_cmd_handle { + enum sss_tool_driver_cmd_type cmd_type; + sss_tool_cmd_func func; +}; + +static int sss_tool_get_nic_version(void *out_buf, const u32 *out_len) +{ + struct sss_tool_drv_version_info *ver_info = out_buf; + int ret; + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer.\n"); + return -EINVAL; + } + + if (*out_len != sizeof(*ver_info)) { + tool_err("Invalid out len :%u is not equal to %lu\n", + *out_len, sizeof(*ver_info)); + return -EINVAL; + } + + ret = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", + SSSNIC_DRV_VERSION, __TIME_STR__); + if (ret < 0) + return -EINVAL; + + return 0; +} + +static const struct sss_tool_cmd_handle sss_tool_nic_cmd_handle[] = { + {SSS_TOOL_GET_TX_INFO, sss_tool_get_tx_info}, + {SSS_TOOL_GET_RX_INFO, sss_tool_get_rx_info}, + {SSS_TOOL_GET_TX_WQE_INFO, sss_tool_get_tx_wqe_info}, + {SSS_TOOL_GET_RX_WQE_INFO, sss_tool_get_rx_wqe_info}, + {SSS_TOOL_GET_Q_NUM, sss_tool_get_q_num}, + {SSS_TOOL_GET_RX_CQE_INFO, sss_tool_get_rx_cqe_info}, + {SSS_TOOL_GET_INTER_NUM, sss_tool_get_inter_num}, + {SSS_TOOL_SET_PF_BW_LIMIT, sss_tool_set_pf_bw_limit}, + {SSS_TOOL_GET_PF_BW_LIMIT, sss_tool_get_pf_bw_limit}, + {SSS_TOOL_GET_LOOPBACK_MODE, sss_tool_get_loopback_mode}, + {SSS_TOOL_SET_LOOPBACK_MODE, sss_tool_set_loopback_mode}, + {SSS_TOOL_GET_TX_TIMEOUT, sss_tool_get_netdev_tx_timeout}, + {SSS_TOOL_SET_TX_TIMEOUT, sss_tool_set_netdev_tx_timeout}, + {SSS_TOOL_GET_SSET_COUNT, sss_tool_get_sset_count}, + {SSS_TOOL_GET_SSET_ITEMS, sss_tool_get_sset_stats}, + {SSS_TOOL_GET_XSFP_PRESENT, sss_tool_get_xsfp_present}, + {SSS_TOOL_GET_XSFP_INFO, sss_tool_get_xsfp_info}, + {SSS_TOOL_GET_ULD_DEV_NAME, sss_tool_get_netdev_name}, + {SSS_TOOL_CLEAR_FUNC_STATS, sss_tool_clear_func_stats}, + {SSS_TOOL_SET_LINK_MODE, sss_tool_set_link_mode}, + {SSS_TOOL_DCB_STATE, sss_tool_dcb_mt_dcb_state}, + {SSS_TOOL_QOS_DEV, sss_tool_dcb_mt_qos_map}, + {SSS_TOOL_GET_QOS_COS, sss_tool_dcb_mt_hw_qos_get}, +}; + +static int sss_tool_cmd_to_nic_driver(struct sss_nic_dev *nic_dev, + u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int idx; + int cmd_num = ARRAY_LEN(sss_tool_nic_cmd_handle); + enum sss_tool_driver_cmd_type cmd_type = (enum sss_tool_driver_cmd_type)cmd; + int ret = -EINVAL; + + mutex_lock(&nic_dev->qp_mutex); + for (idx = 0; idx < cmd_num; idx++) { + if (cmd_type == sss_tool_nic_cmd_handle[idx].cmd_type) { + ret = sss_tool_nic_cmd_handle[idx].func + (nic_dev, in_buf, in_len, out_buf, out_len); + break; + } + } + mutex_unlock(&nic_dev->qp_mutex); + + if (idx == cmd_num) + tool_err("Fail to send to nic driver, cmd %d is not exist\n", cmd_type); + + return ret; +} + +int sss_tool_ioctl(void *uld_dev, u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + if (cmd == SSS_TOOL_GET_DRV_VERSION) + return sss_tool_get_nic_version(out_buf, out_len); + + if (!uld_dev) + return -EINVAL; + + return sss_tool_cmd_to_nic_driver(uld_dev, cmd, in_buf, in_len, out_buf, out_len); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h new file mode 100644 index 00000000000000..64bbd9c3a40c25 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_FUNC_H +#define SSS_TOOL_NIC_FUNC_H + +int sss_tool_ioctl(void *uld_dev, u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c new file mode 100644 index 00000000000000..ba2a5faf0dea89 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_tool_nic_phy_attr.h" + +enum sss_tool_link_mode { + SSS_TOOL_LINK_MODE_AUTO = 0, + SSS_TOOL_LINK_MODE_UP, + SSS_TOOL_LINK_MODE_DOWN, + SSS_TOOL_LINK_MODE_MAX, +}; + +typedef void (*sss_tool_set_link_mode_handler_t)(struct sss_nic_dev *nic_dev); + +int sss_tool_get_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_loop_mode *mode = out_buf; + + if (!out_len || !mode) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (*out_len != sizeof(*mode)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(*mode)); + return -EINVAL; + } + + return sss_nic_get_loopback_mode(nic_dev, (u8 *)&mode->loop_mode, + (u8 *)&mode->loop_ctrl); +} + +int sss_tool_set_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + const struct sss_tool_loop_mode *mode = in_buf; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + tool_err("Fail to set lookback mode, netdev is down\n"); + return -EFAULT; + } + + if (!mode || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(*mode) || *out_len != sizeof(*mode)) { + tool_err("Invalid in len %d or out len %u is not equal to %lu\n", + in_len, *out_len, sizeof(*mode)); + return -EINVAL; + } + + ret = sss_nic_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, (u8)mode->loop_ctrl); + if (ret == 0) + tool_info("succeed to set loopback mode %u en %u\n", + mode->loop_mode, mode->loop_ctrl); + + return ret; +} + +static bool sss_tool_check_param_valid(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + const u32 *out_len) +{ + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + tool_err("Fail to set link mode, netdev is down\n"); + return false; + } + + if (!in_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(SSS_TOOL_LINK_MODE_MAX) || + *out_len != sizeof(SSS_TOOL_LINK_MODE_MAX)) { + tool_err("Invalid in len %d or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(SSS_TOOL_LINK_MODE_MAX)); + return false; + } + + return true; +} + +static void sss_tool_set_link_status(struct sss_nic_dev *nic_dev, bool status) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST) || + SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return; + + if (!status) { + if (!netif_carrier_ok(netdev)) + return; + + tool_info("Link down\n"); + nic_dev->link_status = status; + netif_carrier_off(netdev); + + } else { + if (netif_carrier_ok(netdev)) + return; + + tool_info("Link up\n"); + nic_dev->link_status = status; + netif_carrier_on(netdev); + } +} + +static void sss_tool_link_mode_auto(struct sss_nic_dev *nic_dev) +{ + u8 link_status; + + if (sss_nic_get_hw_link_state(nic_dev, &link_status)) + link_status = false; + + sss_tool_set_link_status(nic_dev, (bool)link_status); + tool_info("Success to set link mode to auto, the state is link %s\n", + (link_status ? "up" : "down")); +} + +static void sss_tool_link_mode_up(struct sss_nic_dev *nic_dev) +{ + sss_tool_set_link_status(nic_dev, true); + tool_info("Success to set link mode to up\n"); +} + +static void sss_tool_link_mode_down(struct sss_nic_dev *nic_dev) +{ + sss_tool_set_link_status(nic_dev, false); + tool_info("Success to set link mode to down\n"); +} + +int sss_tool_set_link_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + const enum sss_tool_link_mode *mode = in_buf; + + sss_tool_set_link_mode_handler_t handler[] = { + sss_tool_link_mode_auto, + sss_tool_link_mode_up, + sss_tool_link_mode_down, + }; + + if (!sss_tool_check_param_valid(nic_dev, in_buf, in_len, out_len)) + return -EFAULT; + + if (*mode >= SSS_TOOL_LINK_MODE_MAX) { + tool_err("Fail to set link mode, mode %d\n", *mode); + return -EINVAL; + } + + handler[*mode](nic_dev); + + return 0; +} + +static int sss_tool_update_pf_bw_limit(struct sss_nic_dev *nic_dev, u32 bw_limit) +{ + int ret; + u32 old_bw_limit; + struct sss_nic_port_info port_info = {0}; + struct sss_nic_io *nic_io = nic_dev->nic_io; + + if (!nic_io) + return -EINVAL; + + if (bw_limit > SSSNIC_PF_LIMIT_BW_MAX) { + tool_err("Fail to update pf bw limit, bandwidth: %u large then max limit: %u\n", + bw_limit, SSSNIC_PF_LIMIT_BW_MAX); + return -EINVAL; + } + + old_bw_limit = nic_io->mag_cfg.pf_bw_limit; + nic_io->mag_cfg.pf_bw_limit = bw_limit; + + if (!SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + return 0; + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + tool_err("Fail to get port info\n"); + nic_io->mag_cfg.pf_bw_limit = bw_limit; + return -EIO; + } + + ret = sss_nic_set_pf_rate(nic_dev, port_info.speed); + if (ret != 0) { + tool_err("Fail to set pf bandwidth\n"); + nic_io->mag_cfg.pf_bw_limit = bw_limit; + return ret; + } + + return 0; +} + +static int sss_tool_check_preconditions(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len) +{ + int ret; + u8 link_state = 0; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + tool_err("Fail to set VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(in_len)) { + tool_err("Invalid in len %d is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + if (*out_len != sizeof(link_state)) { + tool_err("Invalid out len %d is not equal to %lu\n", + in_len, sizeof(link_state)); + return -EINVAL; + } + + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (ret != 0) { + tool_err("Fail to get link state\n"); + return -EIO; + } + + if (!link_state) { + tool_err("Fail to set pf rate, must be link up\n"); + return -EINVAL; + } + + return 0; +} + +int sss_tool_set_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u32 pf_bw_limit; + + ret = sss_tool_check_preconditions(nic_dev, in_buf, in_len, out_buf, out_len); + if (ret != 0) + return -EINVAL; + + pf_bw_limit = *((u32 *)in_buf); + + ret = sss_tool_update_pf_bw_limit(nic_dev, pf_bw_limit); + if (ret != 0) { + tool_err("Fail to set pf bandwidth limit to %d%%\n", pf_bw_limit); + if (ret < 0) + return ret; + } + + *((u8 *)out_buf) = (u8)ret; + + return 0; +} + +int sss_tool_get_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_nic_io *nic_io = NULL; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + tool_err("Fail to get VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (*out_len != sizeof(in_len)) { + tool_err("Invalid out len %d is not equal to %lu\n", + *out_len, sizeof(in_len)); + return -EFAULT; + } + + nic_io = nic_dev->nic_io; + if (!nic_io) + return -EINVAL; + + *((u32 *)out_buf) = nic_io->mag_cfg.pf_bw_limit; + + return 0; +} + +int sss_tool_get_netdev_name(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != IFNAMSIZ) { + tool_err("Invalid out len %u is not equal to %u\n\n", + *out_len, IFNAMSIZ); + return -EINVAL; + } + + strscpy(out_buf, nic_dev->netdev->name, IFNAMSIZ); + + return 0; +} + +int sss_tool_get_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int *tx_timeout = out_buf; + struct net_device *net_dev = nic_dev->netdev; + + if (!out_buf || !out_len) { + tool_err("Fail to get netdev tx timeout, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != sizeof(in_len)) { + tool_err("Fail to get netdev tx timeout, out len %u is not equal to %lu\n", + *out_len, sizeof(in_len)); + return -EINVAL; + } + + *tx_timeout = net_dev->watchdog_timeo; + + return 0; +} + +int sss_tool_set_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + const int *tx_timeout = in_buf; + struct net_device *net_dev = nic_dev->netdev; + + if (!in_buf) { + tool_err("Invalid in buf is null\n"); + return -EFAULT; + } + + if (in_len != sizeof(in_len)) { + tool_err("Invalid in len: %u is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + net_dev->watchdog_timeo = *tx_timeout * HZ; + tool_info("Success to set tx timeout check period to %ds\n", *tx_timeout); + + return 0; +} + +int sss_tool_get_xsfp_present(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_nic_mbx_get_xsfp_present *sfp_info = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*sfp_info) || *out_len != sizeof(*sfp_info)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*sfp_info)); + return -EINVAL; + } + + sfp_info->abs_status = sss_nic_if_sfp_absent(nic_dev); + sfp_info->head.state = 0; + + return 0; +} + +int sss_tool_get_xsfp_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + struct sss_nic_mbx_get_xsfp_info *xsfp_info = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*xsfp_info) || *out_len != sizeof(*xsfp_info)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*xsfp_info)); + return -EINVAL; + } + + ret = sss_nic_get_sfp_info(nic_dev, xsfp_info); + if (ret != 0) + xsfp_info->head.state = SSS_TOOL_EIO; + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h new file mode 100644 index 00000000000000..cbf4fbdce4f7e6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_PHY_ATTR_H +#define SSS_TOOL_NIC_PHY_ATTR_H + +int sss_tool_get_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_link_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_netdev_name(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_xsfp_present(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_xsfp_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c new file mode 100644 index 00000000000000..1edd4837c35775 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_kernel.h" +#include "sss_nic_tx.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_tool_nic_qp_info.h" + +static int sss_tool_get_wqe_info(struct sss_nic_dev *nic_dev, + u16 q_id, u16 wqe_id, u16 wqebb_cnt, + u8 *out_buff, const u16 *out_len, + enum sss_nic_queue_type q_type) +{ + u32 i; + void *src_wqebb = NULL; + u32 offset; + struct sss_nic_io_queue *queue = NULL; + struct sss_nic_io *nic_io = NULL; + + nic_io = nic_dev->nic_io; + if (!nic_io) { + tool_err("Fail to get wqe info, nic_io is NULL.\n"); + return -EINVAL; + } + + if (q_id >= nic_io->max_qp_num) { + tool_err("Fail to get wqe info, q_id[%u] > num_qps_cfg[%u].\n", + q_id, nic_io->max_qp_num); + return -EINVAL; + } + + if (q_type == SSSNIC_RQ) + queue = &nic_io->rq_group[q_id]; + else + queue = &nic_io->sq_group[q_id]; + + if ((wqe_id + wqebb_cnt) > queue->wq.q_depth) { + tool_err("Fail to get wqe info, (idx[%u] + idx[%u]) > q_depth[%u].\n", + wqe_id, wqebb_cnt, queue->wq.q_depth); + return -EINVAL; + } + + if (*out_len != (queue->wq.elem_size * wqebb_cnt)) { + tool_err("Fail to get wqe info, out len :%u is not equal to %d\n", + *out_len, (queue->wq.elem_size * wqebb_cnt)); + return -EINVAL; + } + + for (i = 0; i < wqebb_cnt; i++) { + src_wqebb = sss_wq_wqebb_addr(&queue->wq, + (u16)SSS_WQ_MASK_ID(&queue->wq, wqe_id + i)); + offset = queue->wq.elem_size * i; + memcpy(out_buff + offset, src_wqebb, queue->wq.elem_size); + } + + return 0; +} + +static void sss_tool_get_sq_info(struct sss_nic_io *nic_io, u16 q_id, + struct sss_tool_sq_info *sq_info) +{ + struct sss_nic_io_queue *sq = NULL; + + sq = &nic_io->sq_group[q_id]; + + sq_info->q_depth = sq->wq.q_depth; + sq_info->q_id = q_id; + sq_info->pi = sss_nic_get_sq_local_pi(sq); + sq_info->doorbell.map_addr = (u64 *)sq->db_addr; + sq_info->fi = sss_nic_get_sq_hw_ci(sq); + sq_info->wqebb_size = sq->wq.elem_size; + sq_info->ci = sss_nic_get_sq_local_ci(sq); + sq_info->ci_addr = sq->tx.ci_addr; + sq_info->slq_handle = sq; + sq_info->cla_addr = sq->wq.block_paddr; +} + +static void sss_tool_get_rq_info(struct sss_nic_io *nic_io, u16 q_id, + struct sss_tool_rq_info *rq_info) +{ + struct sss_nic_io_queue *rq = NULL; + + rq = &nic_io->rq_group[q_id]; + + rq_info->msix_idx = rq->msix_id; + rq_info->hw_pi = cpu_to_be16(*rq->rx.pi_vaddr); + rq_info->buf_len = nic_io->rx_buff_len; + rq_info->wqebb_size = rq->wq.elem_size; + rq_info->slq_handle = rq; + rq_info->q_id = q_id; + rq_info->ci_cla_tbl_addr = rq->wq.block_paddr; + rq_info->q_depth = (u16)rq->wq.q_depth; + rq_info->ci_wqe_page_addr = sss_wq_get_first_wqe_page_addr(&rq->wq); +} + +static int sss_tool_get_queue_info(struct sss_nic_dev *nic_dev, u16 q_id, + void *out_buff, enum sss_nic_queue_type q_type) +{ + struct sss_nic_io *nic_io = NULL; + + nic_io = nic_dev->nic_io; + if (!nic_io) { + tool_err("Fail to get wqe info, nic_io is NULL.\n"); + return -EINVAL; + } + + if (q_id >= nic_io->max_qp_num) { + tool_err("Fail to get rq info, input q_id(%u) is larger than max qp num:%u\n", + q_id, nic_io->max_qp_num); + return -EINVAL; + } + + (q_type == SSSNIC_RQ) ? sss_tool_get_rq_info(nic_io, q_id, out_buff) : + sss_tool_get_sq_info(nic_io, q_id, out_buff); + + return 0; +} + +static bool sss_tool_check_input_pointer(struct sss_nic_dev *nic_dev, + const void *in_buf, void *out_buf, u32 *out_len) +{ + if (!SSS_CHANNEL_RES_VALID(nic_dev)) { + tool_err("Invalid input param nic_dev\n"); + return false; + } + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid input param,in_buf/out_buf/out_len\n"); + return false; + } + + return true; +} + +int sss_tool_get_tx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 q_id; + struct sss_tool_sq_info sq_info = {0}; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(in_len)) { + tool_err("Fail to get tx info, in len :%u is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + if (*out_len != sizeof(sq_info)) { + tool_err("Fail to get tx info, out len :%u is not equal to %lu\n", + *out_len, sizeof(sq_info)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)in_buf)); + + return sss_tool_get_queue_info(nic_dev, q_id, out_buf, SSSNIC_SQ); +} + +int sss_tool_get_tx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqebb_cnt = 1; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(*info)) { + tool_err("Fail to get tx wqe info, in len %u is not equal to %lu\n", + in_len, sizeof(*info)); + return -EINVAL; + } + + return sss_tool_get_wqe_info(nic_dev, (u16)info->q_id, (u16)info->wqe_id, wqebb_cnt, + out_buf, (u16 *)out_len, SSSNIC_SQ); +} + +int sss_tool_get_rx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u16 q_id; + struct sss_tool_rq_info *rq_info = out_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(u32)) { + tool_err("Invalid in len: %u is not equal to %lu\n", + in_len, sizeof(u32)); + return -EINVAL; + } + + if (*out_len != sizeof(*rq_info)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(*rq_info)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)in_buf)); + + ret = sss_tool_get_queue_info(nic_dev, q_id, out_buf, SSSNIC_RQ); + if (ret != 0) { + tool_err("Fail to get rq info, ret: %d.\n", ret); + return ret; + } + + rq_info->pending_limt = nic_dev->rq_desc_group[q_id].last_pending_limt; + rq_info->msix_vector = nic_dev->rq_desc_group[q_id].irq_id; + rq_info->delta = (u16)nic_dev->rq_desc_group[q_id].delta; + rq_info->sw_pi = nic_dev->rq_desc_group[q_id].pi; + rq_info->coalesc_timer_cfg = nic_dev->rq_desc_group[q_id].last_coal_timer; + rq_info->ci = (u16)(nic_dev->rq_desc_group[q_id].ci & + nic_dev->rq_desc_group[q_id].qid_mask); + + return 0; +} + +int sss_tool_get_rx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqebb_cnt = 1; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(struct sss_tool_wqe_info)) { + tool_err("Fail to get rx wqe info, in len: %u is not equal to %lu\n", + in_len, sizeof(struct sss_tool_wqe_info)); + return -EINVAL; + } + + return sss_tool_get_wqe_info(nic_dev, (u16)info->q_id, (u16)info->wqe_id, wqebb_cnt, + out_buf, (u16 *)out_len, SSSNIC_RQ); +} + +int sss_tool_get_rx_cqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqe_id = 0; + u16 q_id = 0; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(struct sss_tool_wqe_info)) { + tool_err("Fail to get rx cqe info, in len: %u is not equal to %lu\n", + in_len, sizeof(struct sss_tool_wqe_info)); + return -EINVAL; + } + + if (*out_len != sizeof(struct sss_nic_cqe)) { + tool_err("Fail to get rx cqe info, out len: %u is not equal to %lu\n", + *out_len, sizeof(struct sss_nic_cqe)); + return -EINVAL; + } + + wqe_id = (u16)info->wqe_id; + q_id = (u16)info->q_id; + + if (q_id >= nic_dev->qp_res.qp_num || wqe_id >= nic_dev->rq_desc_group[q_id].q_depth) { + tool_err("Fail to get rx cqe info, q_id[%u] >= %u, or wqe idx[%u] >= %u.\n", + q_id, nic_dev->qp_res.qp_num, wqe_id, + nic_dev->rq_desc_group[q_id].q_depth); + return -EFAULT; + } + + memcpy(out_buf, nic_dev->rq_desc_group[q_id].rx_desc_group[wqe_id].cqe, + sizeof(struct sss_nic_cqe)); + + return 0; +} + +int sss_tool_get_q_num(struct sss_nic_dev *nic_dev, const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len) +{ + if (!SSS_CHANNEL_RES_VALID(nic_dev)) { + tool_err("Fail to get queue number, netdev is down\n"); + return -EFAULT; + } + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer.\n"); + return -EINVAL; + } + + if (*out_len != sizeof(nic_dev->qp_res.qp_num)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(nic_dev->qp_res.qp_num)); + return -EINVAL; + } + + *((u16 *)out_buf) = nic_dev->qp_res.qp_num; + + return 0; +} + +int sss_tool_get_inter_num(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 intr_num = sss_nic_intr_num(nic_dev->hwdev); + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != sizeof(intr_num)) { + tool_err("Invalid out len:%u is not equal to %lu\n", + *out_len, sizeof(intr_num)); + return -EFAULT; + } + + *(u16 *)out_buf = intr_num; + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h new file mode 100644 index 00000000000000..c7b674751ecd78 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_QP_INFO_H +#define SSS_TOOL_NIC_QP_INFO_H + +int sss_tool_get_tx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_tx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_cqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_q_num(struct sss_nic_dev *nic_dev, const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len); + +int sss_tool_get_inter_num(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c new file mode 100644 index 00000000000000..b1bb47e7037054 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_kernel.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_ethtool_stats_api.h" +#include "sss_tool_nic_stats.h" + +enum sss_tool_show_set { + SSS_TOOL_SHOW_SSET_IO_STATS = 1, +}; + +static void sss_tool_reset_nicdev_stats(struct sss_nic_dev *nic_dev) +{ + u64_stats_update_begin(&nic_dev->tx_stats.stats_sync); + nic_dev->tx_stats.rsvd1 = 0; + nic_dev->tx_stats.rsvd2 = 0; + nic_dev->tx_stats.tx_drop = 0; + nic_dev->tx_stats.tx_timeout = 0; + nic_dev->tx_stats.tx_invalid_qid = 0; + u64_stats_update_end(&nic_dev->tx_stats.stats_sync); +} + +static void sss_tool_reset_rq_stats(struct sss_nic_rq_stats *rq_stats) +{ + u64_stats_update_begin(&rq_stats->stats_sync); + rq_stats->reset_drop_sge = 0; + rq_stats->rx_packets = 0; + rq_stats->alloc_rx_dma_err = 0; + rq_stats->rx_bytes = 0; + + rq_stats->csum_errors = 0; + rq_stats->rx_dropped = 0; + rq_stats->errors = 0; + rq_stats->large_xdp_pkts = 0; + rq_stats->rx_buf_errors = 0; + rq_stats->alloc_skb_err = 0; + rq_stats->xdp_dropped = 0; + rq_stats->other_errors = 0; + rq_stats->rsvd2 = 0; + u64_stats_update_end(&rq_stats->stats_sync); +} + +static void sss_tool_reset_sq_stats(struct sss_nic_sq_stats *sq_stats) +{ + u64_stats_update_begin(&sq_stats->stats_sync); + sq_stats->unknown_tunnel_proto = 0; + sq_stats->tx_packets = 0; + sq_stats->tx_dropped = 0; + sq_stats->frag_len_overflow = 0; + sq_stats->tx_busy = 0; + sq_stats->wake = 0; + sq_stats->skb_pad_err = 0; + sq_stats->dma_map_err = 0; + sq_stats->frag_size_zero = 0; + sq_stats->tx_bytes = 0; + sq_stats->offload_err = 0; + sq_stats->rsvd1 = 0; + sq_stats->rsvd2 = 0; + u64_stats_update_end(&sq_stats->stats_sync); +} + +int sss_tool_clear_func_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int i; + + if (!out_len) { + tool_err("Invalid out len is null\n"); + return -EINVAL; + } + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + memset(&nic_dev->net_stats, 0, sizeof(nic_dev->net_stats)); +#endif + sss_tool_reset_nicdev_stats(nic_dev); + for (i = 0; i < nic_dev->max_qp_num; i++) { + sss_tool_reset_rq_stats(&nic_dev->rq_desc_group[i].stats); + sss_tool_reset_sq_stats(&nic_dev->sq_desc_group[i].stats); + } + + *out_len = 0; + + return 0; +} + +int sss_tool_get_sset_count(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u32 count = 0; + + if (!in_buf || in_len != sizeof(count) || !out_len || + *out_len != sizeof(count) || !out_buf) { + tool_err("Invalid in_len: %u\n", in_len); + return -EINVAL; + } + + if (*((u32 *)in_buf) == SSS_TOOL_SHOW_SSET_IO_STATS) + count = sss_nic_get_io_stats_size(nic_dev); + + *((u32 *)out_buf) = count; + + return 0; +} + +int sss_tool_get_sset_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_show_item *items = out_buf; + u32 count; + + if (!in_buf || in_len != sizeof(count) || !out_len || !out_buf) { + tool_err("Invalid in_len: %u\n", in_len); + return -EINVAL; + } + + if (*((u32 *)in_buf) != SSS_TOOL_SHOW_SSET_IO_STATS) { + tool_err("Invalid input para %u stats\n", *((u32 *)in_buf)); + return -EINVAL; + } + + count = sss_nic_get_io_stats_size(nic_dev); + + if (count * sizeof(*items) != *out_len) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, count * sizeof(*items)); + return -EINVAL; + } + + sss_nic_get_io_stats(nic_dev, items); + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h new file mode 100644 index 00000000000000..1c37214deeea3e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_STATS_H +#define SSS_TOOL_NIC_STATS_H + +int sss_tool_clear_func_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_sset_count(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_sset_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 828dc10e4067ff..1df7b7b19039b5 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -16,6 +16,7 @@ config MDIO tristate source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/3snic/Kconfig" source "drivers/net/ethernet/actions/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" @@ -203,5 +204,7 @@ source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" source "drivers/net/ethernet/phytium/Kconfig" +source "drivers/net/ethernet/guangruntong/Kconfig" +source "drivers/net/ethernet/bzwx/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index d76fd52628a039..44008ee5c2eb0b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ +obj-$(CONFIG_NET_VENDOR_3SNIC) += 3snic/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ @@ -108,3 +109,5 @@ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ obj-$(CONFIG_OA_TC6) += oa_tc6.o obj-$(CONFIG_NET_VENDOR_PHYTIUM) += phytium/ +obj-$(CONFIG_NET_VENDOR_GRT) += guangruntong/ +obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ diff --git a/drivers/net/ethernet/bzwx/Kconfig b/drivers/net/ethernet/bzwx/Kconfig new file mode 100644 index 00000000000000..5cc757ceba64ef --- /dev/null +++ b/drivers/net/ethernet/bzwx/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration +# + +config NET_VENDOR_BZWX + bool "BeiZhongWangXin devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about BeiZhongWangXin devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_BZWX + +source "drivers/net/ethernet/bzwx/nce/Kconfig" + +endif # NET_VENDOR_BZWX diff --git a/drivers/net/ethernet/bzwx/Makefile b/drivers/net/ethernet/bzwx/Makefile new file mode 100644 index 00000000000000..05273f2858c508 --- /dev/null +++ b/drivers/net/ethernet/bzwx/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +obj-$(CONFIG_NCE) += nce/ diff --git a/drivers/net/ethernet/bzwx/nce/Kconfig b/drivers/net/ethernet/bzwx/nce/Kconfig new file mode 100644 index 00000000000000..87177c4e7ba8e4 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Kconfig @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration + + +config NCE + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + depends on PCI + help + This selects the drivers support BeiZhongWangXin Ethernet Connection N5/N6 Series devices. + +if NCE + +config NE6X + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + depends on (PCI_MSI && NETDEVICES && ETHERNET && INET && (X86_64 || ARM64)) + select NET_DEVLINK + help + This driver supports BeiZhongWangXin Ethernet Connection N5/N6 Series + of devices. + + To compile this driver as a module, choose M here. + The module will be called ncepf. + +config NE6XVF + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function support" + depends on NE6X + help + This driver supports virtual functions for BeiZhongWangXin Ethernet Connection N5/N6 Series + Virtual Function devices. + + To compile this driver as a module, choose M here. The module + will be called ncevf. + +endif #NCE diff --git a/drivers/net/ethernet/bzwx/nce/Makefile b/drivers/net/ethernet/bzwx/nce/Makefile new file mode 100644 index 00000000000000..9bb849c0054bac --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Makefile @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +subdir-ccflags-y += -I$(src)/comm +subdir-ccflags-y += -I$(src)/ne6x +subdir-ccflags-y += -I$(src)/ne6x_vf +ccflags-y += -I$(srctree)/$(src) +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/comm +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/ne6x +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/ne6x_vf + +obj-$(CONFIG_NE6X) += ncepf.o +ncepf-objs := comm/txrx.o \ + ne6x/ne6x_main.o \ + ne6x/ne6x_ethtool.o \ + ne6x/ne6x_procfs.o \ + ne6x/ne6x_netlink.o \ + ne6x/ne6x_interrupt.o \ + ne6x/ne6x_reg.o \ + ne6x/ne6x_dev.o \ + ne6x/ne6x_txrx.o \ + ne6x/ne6x_virtchnl_pf.o \ + ne6x/ne6x_arfs.o +ncepf-$(CONFIG_DEBUG_FS) += ne6x/ne6x_debugfs.o + +obj-$(CONFIG_NE6XVF) += ncevf.o +ncevf-objs := comm/txrx.o \ + ne6x_vf/ne6xvf_main.o \ + ne6x_vf/ne6xvf_ethtool.o \ + ne6x_vf/ne6xvf_virtchnl.o \ + ne6x_vf/ne6xvf_txrx.o +ncevf-$(CONFIG_DEBUG_FS) += ne6x_vf/ne6xvf_debugfs.o diff --git a/drivers/net/ethernet/bzwx/nce/comm/common.h b/drivers/net/ethernet/bzwx/nce/comm/common.h new file mode 100644 index 00000000000000..9bf8bf042c7531 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/common.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMMON_H +#define _NE6X_COMMON_H + +#define NE6X_MAX_U64 0xFFFFFFFFFFFFFFFFULL + +#define NE6X_MODULE_TYPE_TOTAL_BYTE 3 + +#define NE6X_AQ_LINK_UP 0x1ULL +#define NE6X_AQ_AN_COMPLETED BIT(0) + +#define PCI_VENDOR_ID_BZWX 0xD20C + +struct ne6x_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_miss; + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ + u64 rx_malform; + u64 tx_malform; +}; + +enum ne6x_phy_type { + NE6X_PHY_TYPE_UNKNOWN = 0, + NE6X_PHY_TYPE_10GBASE = 1, + NE6X_PHY_TYPE_25GBASE, + NE6X_PHY_TYPE_40GBASE, + NE6X_PHY_TYPE_100GBASE, + NE6X_PHY_TYPE_200GBASE, +}; + +#define NE6X_LINK_SPEED_10GB_SHIFT 0x1 +#define NE6X_LINK_SPEED_40GB_SHIFT 0x2 +#define NE6X_LINK_SPEED_25GB_SHIFT 0x3 +#define NE6X_LINK_SPEED_100GB_SHIFT 0x4 +#define NE6X_LINK_SPEED_200GB_SHIFT 0x5 + +enum ne6x_sdk_link_speed { + NE6X_LINK_SPEED_UNKNOWN = 0, + NE6X_LINK_SPEED_10GB = BIT(NE6X_LINK_SPEED_10GB_SHIFT), + NE6X_LINK_SPEED_40GB = BIT(NE6X_LINK_SPEED_40GB_SHIFT), + NE6X_LINK_SPEED_25GB = BIT(NE6X_LINK_SPEED_25GB_SHIFT), + NE6X_LINK_SPEED_100GB = BIT(NE6X_LINK_SPEED_100GB_SHIFT), + NE6X_LINK_SPEED_200GB = BIT(NE6X_LINK_SPEED_200GB_SHIFT), +}; + +struct ne6x_link_status { + u64 phy_type_low; + u64 phy_type_high; + + u16 max_frame_size; + u16 req_speeds; + u8 topo_media_conflict; + u8 link_cfg_err; + u8 lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 fec_info; + u8 pacing; + u32 link_speed; + u8 module_type[NE6X_MODULE_TYPE_TOTAL_BYTE]; +}; + +struct ne6x_mac_info { + u8 perm_addr[ETH_ALEN]; +}; + +struct ne6x_link_info { + u32 link; + u32 speed; +}; + +enum ne6x_media_type { + NE6X_MEDIA_UNKNOWN = 0, + NE6X_MEDIA_FIBER, + NE6X_MEDIA_BASET, + NE6X_MEDIA_BACKPLANE, + NE6X_MEDIA_DA, + NE6X_MEDIA_AUI, +}; + +struct ne6x_phy_info { + struct ne6x_link_status link_info; + struct ne6x_link_status link_info_old; + u64 phy_type_low; + u64 phy_type_high; + enum ne6x_media_type media_type; + u8 get_link_info; + u16 curr_user_speed_req; +}; + +struct ne6x_port_info { + struct ne6x_hw *hw; /* back pointer to HW instance */ + + u8 lport; + u8 hw_port_id; /* hardware port id */ + u8 hw_trunk_id; + u32 hw_queue_base_old; + u32 hw_queue_base; + u32 hw_max_queue; + + u32 queue; /* current used queue */ + struct ne6x_link_info link_status; + struct ne6x_mac_info mac; + struct ne6x_phy_info phy; +}; + +struct ne6x_bus_info { + u16 domain_num; + u16 device; + u8 func; + u8 bus_num; +}; + +struct ne6x_mbx_snap_buffer_data { + u8 state : 4; + u8 len : 4; + u8 type; + u8 data[6]; +}; + +/* Structure to track messages sent by VFs on mailbox: + * 1. vf_cntr : a counter array of VFs to track the number of + * asynchronous messages sent by each VF + * 2. vfcntr_len : number of entries in VF counter array + */ +struct ne6x_mbx_vf_counter { + u32 *vf_cntr; + u32 vfcntr_len; +}; + +/* Enum defining the different states of the mailbox snapshot in the + * PF-VF mailbox overflow detection algorithm. The + * snapshot can be in + * states: + * 1. NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot + * within + * the mailbox buffer. + * 2. NE6X_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot + * 3. + * NE6X_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the + * mailbox and mark any VFs sending more + * messages than the threshold limit set. + * 4. NE6X_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to + * 0xFFFFFFFF. + */ +enum ne6x_mbx_snapshot_state { + NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0, + NE6X_MAL_VF_DETECT_STATE_TRAVERSE, + NE6X_MAL_VF_DETECT_STATE_DETECT, + NE6X_MAL_VF_DETECT_STATE_INVALID = 0xF, +}; + +struct ne6x_mbx_snapshot { + enum ne6x_mbx_snapshot_state state; + struct ne6x_mbx_vf_counter mbx_vf; +}; + +enum virtchnl_vf_config_codes { + VIRTCHNL_VF_CONFIG_TRUST = 0, + VIRTCHNL_VF_CONFIG_FORCE_LINK = 1, +}; + +struct virtchnl_vf_config { + u8 type; + u8 data[5]; +}; + +enum ne6x_adapter_state { + NE6X_ADPT_DOWN, + NE6X_ADPT_NEEDS_RESTART, + NE6X_ADPT_NETDEV_ALLOCD, + NE6X_ADPT_NETDEV_REGISTERED, + NE6X_ADPT_UMAC_FLTR_CHANGED, + NE6X_ADPT_MMAC_FLTR_CHANGED, + NE6X_ADPT_VLAN_FLTR_CHANGED, + NE6X_ADPT_PROMISC_CHANGED, + NE6X_ADPT_RELEASING, + NE6X_ADPT_RECOVER, + NE6X_ADPT_DOWN_REQUESTED, + NE6X_ADPT_OPEN, + NE6X_ADPT_NBITS /* must be last */ +}; + +struct ne6x_adapt_comm { + u16 port_info; + DECLARE_BITMAP(state, NE6X_ADPT_NBITS); +}; + +struct ne6x_vlan { + u16 tpid; + u16 vid; + u8 prio; +}; + +struct ne6x_vf_vlan { + u16 vid; + u16 tpid; +}; + +struct ne6x_macvlan { + struct list_head list; + struct net_device *vdev; + u8 mac[ETH_ALEN]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_RSS_HASH_TYPE_NONE = 0x0, + NE6X_RSS_HASH_TYPE_IPV4 = 0x01, + NE6X_RSS_HASH_TYPE_IPV4_TCP = 0x02, + NE6X_RSS_HASH_TYPE_IPV6 = 0x04, + NE6X_RSS_HASH_TYPE_IPV6_TCP = 0x08, + NE6X_RSS_HASH_TYPE_IPV4_UDP = 0x10, + NE6X_RSS_HASH_TYPE_IPV6_UDP = 0x20, +}; + +enum { + NE6X_RSS_HASH_FUNC_NONE = 0x0, + NE6X_RSS_HASH_FUNC_TOEPLITZ = 0x01, +}; + +#define NE6X_RSS_MAX_KEY_SIZE 40 +#define NE6X_RSS_MAX_IND_TABLE_SIZE 128 + +struct ne6x_rss_info { + u16 hash_type; + u16 hash_func; + u16 hash_key_size; + u16 ind_table_size; + u8 hash_key[NE6X_RSS_MAX_KEY_SIZE]; + u8 ind_table[NE6X_RSS_MAX_IND_TABLE_SIZE]; +}; + +#define NE6X_VF_VLAN(vid, tpid) ((struct ne6x_vf_vlan){vid, tpid}) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/feature.h b/drivers/net/ethernet/bzwx/nce/comm/feature.h new file mode 100644 index 00000000000000..482b4d2d1d3993 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/feature.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_FEATURE_H +#define _NE6X_FEATURE_H + +#define NE6X_F_RSS BIT(0) +#define NE6X_F_PROMISC BIT(1) +#define NE6X_F_RX_IPV4_CKSUM BIT(2) +#define NE6X_F_RX_UDP_CKSUM BIT(3) +#define NE6X_F_RX_TCP_CKSUM BIT(4) +#define NE6X_F_RX_SCTP_CKSUM BIT(5) +#define NE6X_F_RX_VLAN_STRIP BIT(6) +#define NE6X_F_RX_QINQ_STRIP BIT(7) +#define NE6X_F_RX_VLAN_FILTER BIT(8) +#define NE6X_F_LRO BIT(9) +#define NE6X_F_RX_DISABLE BIT(10) +#define NE6X_F_RX_FW_LLDP BIT(11) +#define NE6X_F_RX_ALLMULTI BIT(12) +#define NE6X_F_FLOW_STEERING BIT(15) +#define NE6X_F_TX_VLAN BIT(16) +#define NE6X_F_TX_IP_CKSUM BIT(17) +#define NE6X_F_TX_TCP_CKSUM BIT(18) +#define NE6X_F_TX_UDP_CKSUM BIT(19) +#define NE6X_F_TX_SCTP_CKSUM BIT(20) +#define NE6X_F_TX_TCP_SEG BIT(21) +#define NE6X_F_TX_UDP_SEG BIT(22) +#define NE6X_F_TX_QINQ BIT(23) +#define NE6X_F_TX_NIC_SWITCH BIT(24) +#define NE6X_F_TX_MAC_LEARN BIT(25) +#define NE6X_F_TX_DISABLE BIT(26) +#define NE6X_F_TX_QOSBANDWIDTH BIT(27) +#define NE6X_F_TX_UDP_TNL_SEG BIT(28) +#define NE6X_F_TX_UDP_TNL_CSUM BIT(29) + +#define NE6X_OFFLOAD_RSS NE6X_F_RSS +#define NE6X_OFFLOAD_RXCSUM (NE6X_F_RX_IPV4_CKSUM | \ + NE6X_F_RX_UDP_CKSUM | \ + NE6X_F_RX_TCP_CKSUM | \ + NE6X_F_RX_SCTP_CKSUM) +#define NE6X_OFFLOAD_TXCSUM (NE6X_F_TX_IP_CKSUM | \ + NE6X_F_TX_TCP_CKSUM | \ + NE6X_F_TX_UDP_CKSUM | \ + NE6X_F_TX_UDP_TNL_CSUM) + +#define NE6X_OFFLOAD_LRO NE6X_F_LRO +#define NE6X_OFFLOAD_TSO NE6X_F_TX_TCP_SEG +#define NE6X_OFFLOAD_UFO NE6X_F_TX_UDP_SEG +#define NE6X_OFFLOAD_SCTP_CSUM NE6X_F_TX_SCTP_CKSUM + +#define NE6X_OFFLOAD_RXD_VLAN (NE6X_F_RX_VLAN_STRIP | \ + NE6X_F_RX_QINQ_STRIP | \ + NE6X_F_RX_VLAN_FILTER) +#define NE6X_OFFLOAD_TXD_VLAN (NE6X_F_TX_VLAN | NE6X_F_TX_QINQ) +#define NE6X_OFFLOAD_L2 NE6X_F_TX_NIC_SWITCH + +#define NE6X_F_SMART_ENABLED BIT(0) +#define NE6X_F_SRIOV_ENABLED BIT(1) +#define NE6X_F_SWITCH_ENABLED BIT(2) +#define NE6X_F_L2FDB_LEARN_ENABLED BIT(3) +#define NE6X_F_VLAN_ENABLED BIT(4) +#define NE6X_F_WHITELIST_ENABLED BIT(5) +#define NE6X_F_DDOS_ENABLED BIT(6) +#define NE6X_F_TRUST_VLAN_ENABLED BIT(7) +#define NE6X_F_S_ROCE_ICRC_ENABLED BIT(8) + +#define NE6X_F_ACK_FLOOD BIT(0) +#define NE6X_F_PUSH_ACK_FLOOD BIT(1) +#define NE6X_F_SYN_ACK_FLOOD BIT(2) +#define NE6X_F_FIN_FLOOD BIT(3) +#define NE6X_F_RST_FLOOD BIT(4) +#define NE6X_F_PUSH_SYN_ACK_FLOOD BIT(5) +#define NE6X_F_UDP_FLOOD BIT(6) +#define NE6X_F_ICMP_FLOOD BIT(7) +#define NE6X_F_FRAGMENT_FLOOD BIT(8) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/mailbox.h b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h new file mode 100644 index 00000000000000..85ae76b1321fe6 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_MAILBOX_H +#define _NE6X_COMM_MAILBOX_H + +enum virtchnl_ops { + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_ADPT_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + /* promiscuous mode / unicast promisc / multicast promisc */ + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_EVENT = 17, /* link state */ + VIRTCHNL_OP_SET_VF_ADDR = 18, + VIRTCHNL_OP_VF_CONFIG = 19, + VIRTCHNL_OP_CONFIG_OFFLOAD = 27, + VIRTCHNL_OP_GET_VF_FEATURE = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_CONFIG_RSS = 30, + VIRTCHNL_OP_GET_PORT_STATUS = 31, + VIRTCHNL_OP_CHANGED_RSS = 32, + VIRTCHNL_OP_SET_VF_STATE = 33, + VIRTCHNL_OP_SET_FAST_MDOE = 34, + VIRTCHNL_OP_CONFIG_VLAN = 40, + VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD = 41, + VIRTCHNL_OP_CONFIG_MTU = 42, + VIRTCHNL_OP_CONFIG_FLOW_CTRL = 43, + + VIRTCHNL_OP_MAX, +}; + +static char local_error_buffer[64]; +static inline const char *ne6x_opcode_str(enum virtchnl_ops opcode) +{ + sprintf(local_error_buffer, "__OPCODE_UNKNOWN_OPCODE(%d)", opcode); + switch (opcode) { + case VIRTCHNL_OP_VERSION: + return "__OPCODE_GET_VERSION"; + case VIRTCHNL_OP_RESET_VF: + return "__OPCODE_RESET_VF"; + case VIRTCHNL_OP_GET_VF_RESOURCES: + return "__OPCODE_GET_VF_RESOURCES"; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + return "__OPCODE_CONFIG_TX_QUEUE"; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + return "__OPCODE_INIT_EXTENDED_CAPS"; + case VIRTCHNL_OP_CONFIG_ADPT_QUEUES: + return "__OPCODE_CONFIG_ADPT_QUEUES"; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + return "__OPCODE_CONFIG_IRQ_MAP"; + case VIRTCHNL_OP_ENABLE_QUEUES: + return "__OPCODE_ENABLE_QUEUES"; + case VIRTCHNL_OP_DISABLE_QUEUES: + return "__OPCODE_DISABLE_QUEUES"; + case VIRTCHNL_OP_ADD_ETH_ADDR: + return "__OPCODE_ADD_ETH_ADDR"; + case VIRTCHNL_OP_DEL_ETH_ADDR: + return "__OPCODE_DEL_ETH_ADDR"; + case VIRTCHNL_OP_ADD_VLAN: + return "__OPCODE_ADD_VLAN"; + case VIRTCHNL_OP_DEL_VLAN: + return "__OPCODE_DEL_VLAN"; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + return "__OPCODE_CONFIG_PROMISCUOUS_MODE"; + case VIRTCHNL_OP_EVENT: + return "__OPCODE_EVENT"; + case VIRTCHNL_OP_CONFIG_RSS: + return "__OPCODE_CONFIG_RSS"; + case VIRTCHNL_OP_CHANGED_RSS: + return "__OP_CHANGED_RSS"; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + return "__OPCODE_CONFIGURE_OFFLOAD"; + case VIRTCHNL_OP_GET_VF_FEATURE: + return "VIRTCHNL_OP_GET_VF_FEATURE"; + case VIRTCHNL_OP_REQUEST_QUEUES: + return "__OPCODE_REQUEST_QUEUES"; + case VIRTCHNL_OP_GET_PORT_STATUS: + return "__OP_GET_PORT_STATUS"; + case VIRTCHNL_OP_SET_VF_ADDR: + return "__OPCODE_SET_VF_ADDR"; + case VIRTCHNL_OP_VF_CONFIG: + return "__VIRTCHNL_OP_VF_CONFIG"; + case VIRTCHNL_OP_SET_VF_STATE: + return "__VIRTCHNL_OP_SET_VF_STATE"; + case VIRTCHNL_OP_SET_FAST_MDOE: + return "__VIRTCHNL_OP_SET_FAST_MDOE"; + case VIRTCHNL_OP_CONFIG_VLAN: + return "__VIRTCHNL_OP_CONFIG_VLAN"; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + return "__VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD"; + case VIRTCHNL_OP_CONFIG_MTU: + return "__VIRTCHNL_OP_CONFIG_MTU"; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + return "__VIRTCHNL_OP_CONFIG_FLOW_CTRL"; + default: + return local_error_buffer; + } +} + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +static inline const char *ne6x_mbox_status_str(enum virtchnl_status_code opcode) +{ + switch (opcode) { + case VIRTCHNL_STATUS_SUCCESS: + return "__STATUS_SUCCESS"; + case VIRTCHNL_STATUS_ERR_PARAM: + return "__STATUS_ERR_PARAM"; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return "__STATUS_ERR_NO_MEMORY"; + case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: + return "__STATUS_ERR_OPCODE_MISMATCH"; + case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: + return "__STATUS_ERR_CQP_COMPL_ERROR"; + case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: + return "__STATUS_ERR_INVALID_VF_ID"; + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return "__STATUS_ERR_ADMIN_QUEUE_ERROR"; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return "__STATUS_ERR_NOT_SUPPORTED"; + default: + return "__STATUS_UNKNOWN"; + } +} + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/reg.h b/drivers/net/ethernet/bzwx/nce/comm/reg.h new file mode 100644 index 00000000000000..15a745bb06f3b0 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/reg.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_REG_H +#define _NE6X_COMM_REG_H + +#include + +#define NE6X_BAR2_VP_TDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (0 << 11) | (((__reg) & 0xff) << 3)) +#define NE6X_BAR2_VP_RDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (1 << 11) | (((__reg) & 0xff) << 3)) + +/* CIU */ +#define NE6X_VP_BASE_ADDR 0x0 +#define NE6X_VPINT_DYN_CTLN(_VPID, _OFFSET) \ + (((_VPID) << 12) + ((_OFFSET) << 4)) /* _i=0...64 * Reset: PFR */ +#define NE6X_PF_BASE_ADDR 0x138ULL +#define NE6X_PFINT_DYN_CTLN(_PFID, _OFFSET) \ + (((NE6X_PF_BASE_ADDR + (_PFID)) << 12) + ((_OFFSET) << 4)) + /* _i=0...7 */ /* Reset: PFR */ + +#define NE6X_VP_INT 0x00 +#define NE6X_VP_INT_SET 0x01 +#define NE6X_VP_INT_MASK 0x02 +#define NE6X_VP_CQ_INTSHIFT 16 +#define NE6X_CQ_BASE_ADDR 0x03 +#define NE6X_CQ_HD_POINTER 0x04 +#define NE6X_CQ_CFG 0x05 +#define NE6X_RQ_BASE_ADDR 0x07 +#define NE6X_RQ_CFG 0x08 +#define NE6X_RQ_TAIL_POINTER 0x09 +#define NE6X_VP_RELOAD 0x0a +#define NE6X_SQ_BASE_ADDR 0x0b +#define NE6X_SQ_CFG 0x0c +#define NE6X_SQ_TAIL_POINTER 0x0d +#define NE6X_CQ_TAIL_POINTER 0x11 +#define NE6X_RQ_BUFF_OFST 0x12 +#define NE6X_RQ_HD_POINTER 0x13 +#define NE6X_SQ_BUFF_OFST 0x14 +#define NE6X_SQ_HD_POINTER 0x15 +#define NE6X_RQ_OFST 0x16 +#define NE6X_SQ_OFST 0x17 +#define NE6X_RQ_BLOCK_CFG 0x1b +#define NE6X_SQ_METER_CFG0 0x1c +#define NE6X_SQ_METER_CFG1 0x1d +#define NE6X_SQ_METER_CFG2 0x1e +#define NE6X_SQ_METER_CFG3 0x1f +#define NE6X_INT_CFG 0x21 +#define NE6X_CIU_TIME_OUT_CFG 0x45 +#define NE6X_ALL_CQ_CFG 0x46 +#define NE6X_ALL_SQ_CFG 0x47 +#define NE6X_ALL_RQ_CFG 0x48 +#define NE6X_MERGE_CFG 0x49 +#define NE6X_BFD_RECV_CNT 0x4a +#define NE6X_ETH_RECV_CNT 0x4b + +#define NE6X_PF_CON_ADDR(_OFST) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((_OFST) << 4)) +#define NE6X_PF_MAILBOX_DATA 0x40 +#define NE6X_VF_MAILBOX_DATA 0x80 +#define NE6X_PF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_PF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_VF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_VF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_PF_DB_INT_REQ 0xC0 +#define NE6X_PF_DB_INT_ACK 0xC1 +#define NE6X_PF_DB_DREQ_INT 0xC2 +#define NE6X_PF_DB_DREQ_INT_SET 0xC3 +#define NE6X_PF_DB_DREQ_INT_MASK 0xC4 +#define NE6X_PF_DB_DACK_INT 0xC5 +#define NE6X_PF_DB_DACK_INT_SET 0xC6 +#define NE6X_PF_DB_DACK_INT_MASK 0xC7 + +union ne6x_vp_int { + struct vp_int { + u64 csr_ciu_int_vp : 64; + } reg; + u64 val; +}; + +union ne6x_vp_int_mask { + struct vp_int_mask { + u64 csr_ciu_mask_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_base_addr { + struct cq_base_addr { + u64 csr_cq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_cfg { + struct cq_cfg { + u64 csr_cq_len_vp : 16; + u64 csr_cq_merge_time_vp : 16; + u64 csr_cq_merge_size_vp : 4; + u64 rsv0 : 28; + } reg; + u64 val; +}; + +union ne6x_rq_base_addr { + struct rq_base_addr { + u64 csr_rq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_rq_cfg { + struct rq_cfg { + u64 csr_rq_len_vp : 16; + u64 csr_rdq_pull_en : 1; + u64 csr_rqevt_write_back_vp : 1; + u64 csr_recv_pd_type_vp : 2; + u64 csr_recv_pd_revers_en : 1; + u64 rsv0 : 11; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_base_addr { + struct sq_base_addr { + u64 csr_sq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_sq_cfg { + struct sq_cfg { + u64 csr_sq_len_vp : 16; + u64 csr_tdq_pull_en : 1; + u64 csr_sqevt_write_back_vp : 1; + u64 csr_send_pd_revers_en : 1; + u64 rsv0 : 13; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_rq_block_cfg { + struct rq_block_cfg { + u64 csr_rdq_mop_len : 16; + u64 csr_rdq_sop_len : 16; + u64 rsv0 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg0 { + struct sq_meter_cfg0 { + u64 csr_meter_pkt_token_num_vp : 16; + u64 csr_meter_ipg_len_vp : 8; + u64 csr_meter_refresh_en_vp : 1; + u64 csr_meter_rate_limit_en_vp : 1; + u64 csr_meter_packet_mode_vp : 1; + u64 reserved : 37; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg1 { + struct sq_meter_cfg1 { + u64 csr_meter_refresh_count_vp : 28; + u64 reserved : 4; + u64 csr_meter_refresh_interval_vp : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg2 { + struct sq_meter_cfg2 { + u64 csr_meter_resume_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg3 { + struct sq_meter_cfg3 { + u64 csr_meter_pause_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_int_cfg { + struct int_cfg { + u64 csr_sq_hdle_half_int_cnt_vp : 16; + u64 csr_rq_hdle_half_int_cnt_vp : 16; + u64 csr_cq_hdle_half_int_cnt_vp : 16; + u64 rsv0 : 16; + } reg; + u64 val; +}; + +union ne6x_ciu_time_out_cfg { + struct ciu_time_out_cfg { + u64 csr_int_timer_out_cnt : 12; + u64 rsv0 : 52; + } reg; + u64 val; +}; + +union ne6x_all_cq_cfg { + struct all_cq_cfg { + u64 csr_allcq_merge_size : 4; + u64 rsv0 : 4; + u64 csr_allcq_wt_rr_cnt : 7; + u64 csr_allcq_wt_rr_flag : 1; + u64 rsv1 : 48; + } reg; + u64 val; +}; + +union ne6x_all_sq_cfg { + struct all_sq_cfg { + u64 csr_allsq_wb_trigger_info : 8; + u64 csr_allsq_csum_zero_negate : 1; + u64 csr_allsq_pull_merge_cfg : 5; + u64 rsv0 : 50; + } reg; + u64 val; +}; + +union ne6x_all_rq_cfg { + struct all_rq_cfg { + u64 csr_allrq_wb_trigger_info : 8; + u64 csr_allrq_pull_merge_cfg : 5; + u64 rsv0 : 51; + } reg; + u64 val; +}; + +union ne6x_merge_cfg { + struct merge_cfg { + u64 csr_merge_clk_cnt : 16; + u64 rsv0 : 48; + } reg; + u64 val; +}; + +union ne6x_eth_recv_cnt { + struct eth_recv_cnt { + u64 csr_eth_pkt_drop_cnt : 32; + u64 csr_eth_rdq_drop_cnt : 32; + } reg; + u64 val; +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.c b/drivers/net/ethernet/bzwx/nce/comm/txrx.c new file mode 100644 index 00000000000000..24282dfc8f945f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.c @@ -0,0 +1,1555 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "txrx.h" + +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_buf); + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + tx_ring->tx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_buf) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct ne6x_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + return -ENOMEM; +} + +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring) +{ + struct device *dev = cq_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + cq_ring->size = cq_ring->count * sizeof(struct ne6x_cq_desc); + cq_ring->size = ALIGN(cq_ring->size, 4096); + cq_ring->desc = dma_alloc_coherent(dev, cq_ring->size, &cq_ring->dma, GFP_KERNEL); + if (!cq_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + cq_ring->size); + goto err; + } + + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring) +{ + struct device *dev = tg_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + tg_ring->size = tg_ring->count * sizeof(struct ne6x_tx_tag); + tg_ring->size = ALIGN(tg_ring->size, 4096); + tg_ring->desc = dma_alloc_coherent(dev, tg_ring->size, &tg_ring->dma, GFP_KERNEL); + if (!tg_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tg_ring->size); + goto err; + } + + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int err = -ENOMEM; + int bi_size; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_buf); + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + rx_ring->rx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_buf) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ne6x_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + return err; +} + +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + + if (!dev) + return -ENOMEM; + tx_ring->sgl = kzalloc(sizeof(*tx_ring->sgl), GFP_KERNEL); + + if (!tx_ring->sgl) + goto err; + + return 0; +err: + return -ENOMEM; +} + +static inline unsigned int ne6x_txd_use_count(unsigned int size) +{ + return ((size * 85) >> 20) + 1; +} + +bool __ne6x_chk_linearize(struct sk_buff *skb); +static inline bool ne6x_chk_linearize(struct sk_buff *skb, int count) +{ + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < NE6X_MAX_BUFFER_TXD)) + return false; + + if (skb_is_gso(skb)) + return __ne6x_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != NE6X_MAX_BUFFER_TXD; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size); + +static inline int ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + if (likely(NE6X_DESC_UNUSED(tx_ring) >= size)) + return 0; + + return __ne6x_maybe_stop_tx(tx_ring, size); +} + +static inline bool ne6x_rx_is_programming_status(u8 status) +{ + return status & 0x20; +} + +static void ne6x_reuse_rx_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct ne6x_rx_buf *new_buff; + + new_buff = &rx_ring->rx_buf[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static void ne6x_clean_programming_status(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + u8 status) +{ + u32 ntc = rx_ring->next_to_clean; + struct ne6x_rx_buf *rx_buffer; + + /* fetch, update, and store next to clean */ + rx_buffer = &rx_ring->rx_buf[ntc++]; + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* place unused page back on the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static struct ne6x_rx_buf *ne6x_get_rx_buffer(struct ne6x_ring *rx_ring, const unsigned int size) +{ + struct ne6x_rx_buf *rx_buffer; + + rx_buffer = &rx_ring->rx_buf[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void ne6x_add_rx_frag(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, + size, truesize); + + /* page is being used so we must update the page offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *ne6x_construct_skb(struct ne6x_ring *rx_ring, + struct ne6x_rx_buf *rx_buffer, + unsigned int size) +{ + void *page_addr = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, NE6X_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > NE6X_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, page_addr, NE6X_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), page_addr, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset + headlen, size, + truesize); + + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + /* buffer is unused, reset bias back to rx_buffer */ + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static inline bool ne6x_page_is_reusable(struct page *page) +{ + return (page_to_nid(page) == numa_mem_id()) && !page_is_pfmemalloc(page); +} + +static bool ne6x_can_reuse_rx_page(struct ne6x_rx_buf *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* Is any reuse possible? */ + if (unlikely(!ne6x_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_count(page) - pagecnt_bias) > 1)) + return false; +#else +#define NE6X_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - NE6X_RXBUFFER_4096) + if (rx_buffer->page_offset > NE6X_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +static void ne6x_put_rx_buffer(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer) +{ + if (ne6x_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static inline bool ne6x_test_staterr(union ne6x_rx_desc *rx_desc, const u8 stat_err_bits) +{ + return !!(rx_desc->wb.u.val & stat_err_bits); +} + +static bool ne6x_is_non_eop(struct ne6x_ring *rx_ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define NE6X_RXD_EOF BIT(NE6X_RX_DESC_STATUS_EOF_SHIFT) + if (likely(ne6x_test_staterr(rx_desc, NE6X_RXD_EOF))) + return false; + + rx_ring->rx_stats.non_eop_descs++; + rx_desc->wb.u.val = 0; + + return true; +} + +static bool ne6x_cleanup_headers(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc) +{ + if (unlikely(ne6x_test_staterr(rx_desc, BIT(NE6X_RX_DESC_STATUS_ERR_SHIFT)))) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.rx_mem_error++; + return true; + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static inline void ne6x_rx_hash(struct ne6x_ring *ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_rss_hash) + skb_set_hash(skb, rx_hdr->rss_hash, PKT_HASH_TYPE_NONE); +} + +static inline void ne6x_rx_checksum(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc, + struct rx_hdr_info *rx_hdr) +{ + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 0; + skb_checksum_none_assert(skb); + + if (!(rx_ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_bad) { + rx_ring->rx_stats.csum_err++; + } else if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_good) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + } +} + +static inline void ne6x_process_skb_fields(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, + struct rx_hdr_info *rx_hdr) +{ + netdev_features_t features = rx_ring->netdev->features; + bool non_zero_vlan = false; + + ne6x_rx_hash(rx_ring, rx_desc, skb, rx_hdr); + rx_hdr->vlan_tci = ntohs(rx_hdr->vlan_tci); + rx_hdr->vlan_tci_outer = ntohs(rx_hdr->vlan_tci_outer); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_vlan_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (rx_hdr->vlan_tci_outer)); + } + } + } else if (features & NETIF_F_HW_VLAN_STAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_qinq_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + (rx_hdr->vlan_tci_outer)); + } + } + } + + ne6x_rx_checksum(rx_ring, skb, rx_desc, rx_hdr); + skb_record_rx_queue(skb, rx_ring->queue_index); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ne6x_receive_skb(struct ne6x_ring *rx_ring, struct sk_buff *skb) +{ + struct ne6x_q_vector *q_vector = rx_ring->q_vector; + + napi_gro_receive(&q_vector->napi, skb); +} + +static bool ne6x_alloc_mapped_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + /* alloc new page for storage */ + page = dev_alloc_pages(ne6x_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, ne6x_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + NE6X_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ne6x_rx_pg_order(rx_ring)); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +void ne6x_tail_update(struct ne6x_ring *ring, int val) +{ + int i; + + for (i = 0; i < NE6X_TAIL_REG_NUM; i++) + writeq(val, ring->tail + i); +} + +static inline void ne6x_release_rx_desc(struct ne6x_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ne6x_tail_update(rx_ring, val); +} + +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count) +{ + u16 ntu = rx_ring->next_to_use; + union ne6x_rx_desc *rx_desc; + struct ne6x_rx_buf *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + rx_desc = NE6X_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_buf[ntu]; + + do { + if (!ne6x_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->wb.u.val = 0; + rx_desc->w.buffer_mop_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->w.buffer_sop_addr = 0; + rx_desc->w.mop_mem_len = rx_ring->rx_buf_len; + rx_desc->wb.pkt_len = 0; + rx_desc->w.vp = rx_ring->reg_idx; + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = NE6X_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buf; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.u.val = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + return false; + +no_buffers: + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + +static void ne6x_get_rx_head_info(struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + skb_frag_t *frag; + void *page_addr; + u32 temp_len, i; + + if (skb->data_len == 0) { + memcpy(rx_hdr, &skb->data[skb->len - 16], sizeof(struct rx_hdr_info)); + } else { + if (skb_shinfo(skb)->nr_frags > 1) { + i = skb_shinfo(skb)->nr_frags - 1; + frag = &skb_shinfo(skb)->frags[i]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } else { + frag = &skb_shinfo(skb)->frags[0]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } + } +} + +static void ne6x_clean_tx_desc(struct ne6x_tx_desc *tx_desc, struct ne6x_ring *ring) +{ + if (tx_desc->u.flags.tx_drop_addr) + ring->tx_stats.tx_drop_addr++; + + if (tx_desc->u.flags.tx_ecc_err) + ring->tx_stats.tx_ecc_err++; + + if (tx_desc->u.flags.tx_pcie_read_err) { + ring->tx_stats.tx_pcie_read_err++; + dev_info(ring->dev, "**** tx_desc: flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d], sopv[%d], eopv[%d], tso[%d], l3chk[%d], l3oft[%d], l4chk[%d], l4oft[%d], pld[%d], mop[%d], sop[%d], mss[%d],mopa[%lld],sopa[%lld]\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, + tx_desc->tso, tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, + tx_desc->l4_ofst, tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + + tx_desc->u.val = 0; + tx_desc->vp = 0; + tx_desc->event_trigger = 0; + tx_desc->chain = 0; + tx_desc->transmit_type = 0; + tx_desc->sop_valid = 0; + tx_desc->eop_valid = 0; + tx_desc->tso = 0; + tx_desc->l3_csum = 0; + tx_desc->l3_ofst = 0; + tx_desc->l4_csum = 0; + tx_desc->l4_ofst = 0; + tx_desc->pld_ofst = 0; + tx_desc->mop_cnt = 0; + tx_desc->sop_cnt = 0; + tx_desc->mss = 0; + tx_desc->buffer_mop_addr = 0; + tx_desc->buffer_sop_addr = 0; +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *clean_ring = NULL; + union ne6x_rx_desc *rx_desc = NULL; + int i, cq_num, off_idx, ntc; + int budget = napi_budget; + int last_expect = 0; + int total = 0; + + do { + cq_desc = NE6X_CQ_DESC(cq_ring, cq_ring->next_to_use); + cq_num = cq_desc->num; + if (!cq_num) + break; + + dma_rmb(); + cq_ring->stats.packets += cq_num; + + if (cq_desc->ctype) { + clean_ring = q_vector->rx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.rx_cq[i].cq_rx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_err(cq_ring->netdev, "ne6xpf: cqrx err, need debug! cq: %d, rx: %d\n", + off_idx, last_expect); + netdev_err(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, rxq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + rx_desc = NE6X_RX_DESC(clean_ring, off_idx); + rx_desc->wb.u.val = cq_desc->payload.rx_cq[i].cq_rx_stats; + rx_desc->wb.pkt_len = cq_desc->payload.rx_cq[i].cq_rx_len; + if (rx_desc->wb.pkt_len > clean_ring->rx_buf_len) { + if (!rx_desc->wb.u.flags.rx_eop) + rx_desc->wb.pkt_len = clean_ring->rx_buf_len; + else + rx_desc->wb.pkt_len = rx_desc->wb.pkt_len % + clean_ring->rx_buf_len ? + rx_desc->wb.pkt_len % + clean_ring->rx_buf_len : + clean_ring->rx_buf_len; + } + + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.rx_num += cq_num; + } else { + clean_ring = q_vector->tx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.tx_cq[i].cq_tx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_info(cq_ring->netdev, "ne6xpf: cqtx err, need debug! cq: %d, tx: %d\n", + off_idx, last_expect); + netdev_info(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, txq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + tx_desc = NE6X_TX_DESC(clean_ring, off_idx); + tx_desc->u.val = cq_desc->payload.tx_cq[i].cq_tx_stats; + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.tx_num += cq_num; + } + + clean_ring->cq_last_expect = last_expect; + cq_ring->cq_stats.cq_num++; + + /* clean cq desc */ + cq_desc->num = 0; + ntc = cq_ring->next_to_use + 1; + ntc = (ntc < cq_ring->count) ? ntc : 0; + cq_ring->next_to_use = ntc; + prefetch(NE6X_CQ_DESC(cq_ring, ntc)); + + budget--; + total++; + } while (likely(budget)); + + if (NE6X_DESC_UNUSED(cq_ring) < 1024) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + return total; +} + +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = NE6X_DESC_UNUSED(rx_ring); + struct ne6x_rx_buf *rx_buffer = NULL; + struct sk_buff *skb = rx_ring->skb; + union ne6x_rx_desc *rx_desc = NULL; + struct rx_hdr_info rx_hdr; + bool failure = false; + unsigned int size; + u8 rx_status; + + while (likely(total_rx_packets < (unsigned int)budget)) { + if (cleaned_count >= NE6X_RX_BUFFER_WRITE) { + failure = failure || ne6x_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = NE6X_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_status = rx_desc->wb.u.val; + if (!rx_status) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + if (unlikely(ne6x_rx_is_programming_status(rx_status))) { + rx_ring->rx_stats.rx_err++; + ne6x_clean_programming_status(rx_ring, rx_desc, rx_status); + cleaned_count++; + continue; + } + + size = rx_desc->wb.pkt_len; + rx_buffer = ne6x_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + ne6x_add_rx_frag(rx_ring, rx_buffer, skb, size); + else + skb = ne6x_construct_skb(rx_ring, rx_buffer, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + ne6x_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + if (ne6x_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + if (ne6x_cleanup_headers(rx_ring, skb, rx_desc)) { + skb = NULL; + continue; + } + + ne6x_get_rx_head_info(skb, &rx_hdr); + pskb_trim(skb, skb->len - 16); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + ne6x_process_skb_fields(rx_ring, rx_desc, skb, &rx_hdr); + + ne6x_receive_skb(rx_ring, skb); + skb = NULL; + + rx_desc->wb.u.val = 0; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_packets; +} + +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct ne6x_tx_desc *eop_desc = NULL; + u16 i = tx_ring->next_to_clean; + struct ne6x_tx_desc *tx_desc; + struct ne6x_tx_buf *tx_buf; + unsigned int budget = 256; + + tx_buf = &tx_ring->tx_buf[i]; + tx_desc = NE6X_TX_DESC(tx_ring, i); + + if (unlikely(tx_buf->jumbo_frame)) { + tx_buf->napi_budget += napi_budget; + if (!tx_buf->jumbo_finsh) + return !!budget; + + napi_budget = tx_buf->napi_budget; + } + + do { + eop_desc = tx_buf->next_to_watch; + if (!eop_desc) + break; + + prefetchw(&tx_buf->skb->users); + + if (!eop_desc->u.val) + break; + + dma_rmb(); + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + tx_buf->jumbo_frame = 0; + tx_buf->jumbo_finsh = 0; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + if (unlikely(tx_buf->jumbo_frame && !tx_buf->jumbo_finsh)) + break; + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (total_packets) { + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + + /* notify netdev of completed buffers */ + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (NE6X_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && + !test_bit(NE6X_ADPT_DOWN, comm->state)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_q; + } + } + } + + return !!budget; +} + +static inline int ne6x_xmit_descriptor_count(struct sk_buff *skb) +{ + int count = 0; + + count = 1; + count += skb_shinfo(skb)->nr_frags; + + return count; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(NE6X_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static inline u16 ne6x_gso_get_seg_hdrlen(struct sk_buff *skb) +{ + u16 gso_hdr_len; + + gso_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (unlikely(skb->encapsulation)) + gso_hdr_len = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + + return gso_hdr_len; +} + +static int ne6x_tso(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + u8 hdrlen = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) + return 0; + + hdrlen = ne6x_gso_get_seg_hdrlen(skb); + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* update gso_segs and bytecount */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * hdrlen; + + ptx_tag->tag_mss = skb_shinfo(skb)->gso_size; + + return 1; +} + +static void ne6x_tx_prepare_vlan_flags(struct ne6x_ring *tx_ring, + struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + + /* nothing left to do, software offloaded VLAN */ + if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) + return; + + /* the VLAN ethertype/tpid is determined by adapter configuration and netdev + * feature flags, which the driver only allows either 802.1Q or 802.1ad + * VLAN offloads exclusively so we only care about the VLAN ID here + */ + if (skb_vlan_tag_present(skb)) { + if (tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + ptx_tag->tag_vlan2 = cpu_to_be16(skb_vlan_tag_get(skb)); + else if (tx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_TX) + ptx_tag->tag_vlan1 = cpu_to_be16(skb_vlan_tag_get(skb)); + } +} + +static int ne6x_tx_csum(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + tx_ring->tx_stats.csum_good++; + return 1; +} + +static inline void ne6x_tx_desc_push(struct ne6x_tx_desc *tx_desc, + dma_addr_t dma, u32 size) +{ + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->mop_cnt = size; + tx_desc->event_trigger = 1; +} + +static void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, + struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static inline void ne6x_fill_gso_sg(void *p, u16 offset, u16 len, struct ne6x_sg_info *sg) +{ + sg->p = p; + sg->offset = offset; + sg->len = len; +} + +static int ne6x_fill_jumbo_sgl(struct ne6x_ring *tx_ring, struct sk_buff *skb) +{ + u16 sg_max_dlen = 0, dlen = 0, len = 0, offset = 0, send_dlen = 0, total_dlen = 0; + u16 subframe = 0, send_subframe = 0, sg_avail = 0, i = 0, j = 0; + u16 gso_hdr_len = ne6x_gso_get_seg_hdrlen(skb); + struct ne6x_sg_list *sgl = tx_ring->sgl; + + WARN_ON(!sgl); + + memset(sgl, 0, sizeof(struct ne6x_sg_list)); + dlen = skb_headlen(skb) - gso_hdr_len; + sgl->mss = skb_shinfo(skb)->gso_size; + sg_max_dlen = NE6X_MAX_DATA_PER_TXD - gso_hdr_len; + sg_max_dlen = ((u16)(sg_max_dlen / sgl->mss)) * sgl->mss; + total_dlen = skb->data_len + dlen; + sgl->sgl_mss_cnt = sg_max_dlen / sgl->mss; + subframe = total_dlen / sg_max_dlen; + subframe += total_dlen % sg_max_dlen ? 1 : 0; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FST_SG_FLAG | NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + offset = gso_hdr_len; + sg_avail = sg_max_dlen; + ++send_subframe; + i++; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(skb->data, offset, len, &sgl->sg[i]); + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + + for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[j]; + + dlen = skb_frag_size(f); + offset = 0; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(f, offset, len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FRAG_FLAG; + + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + offset = 0; + } +end: + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + sgl->sg_num = ++i; + return 0; +err: + return -1; +} + +static void ne6x_fill_tx_desc(struct ne6x_tx_desc *tx_desc, u8 vp, dma_addr_t tag_dma, + dma_addr_t dma, struct ne6x_sg_info *sg) +{ + memset(tx_desc, 0, NE6X_TX_DESC_SIZE); + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->buffer_sop_addr = (sg->flag & NE6X_SG_SOP_FLAG) ? cpu_to_le64(tag_dma) : 0; + tx_desc->mop_cnt = sg->len; + tx_desc->event_trigger = 1; + tx_desc->vp = vp; + tx_desc->sop_valid = (sg->flag & NE6X_SG_SOP_FLAG) ? 1u : 0u; + tx_desc->eop_valid = (sg->flag & NE6X_SG_EOP_FLAG) ? 1u : 0u; + tx_desc->sop_cnt = (sg->flag & NE6X_SG_SOP_FLAG) ? 32 : 0; + if (tx_desc->eop_valid) { + tx_desc->sop_cnt = tx_desc->mop_cnt; + tx_desc->buffer_sop_addr = tx_desc->buffer_mop_addr; + tx_desc->mop_cnt = 4; + } +} + +static void ne6x_fill_tx_priv_tag(struct ne6x_ring *tx_ring, struct ne6x_tx_tag *tx_tag, + int mss, struct ne6x_sg_info *sg) +{ + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + + tx_tag->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + tx_tag->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + tx_tag->tag_vport = (comm->port_info >> 8) & 0xFF; + tx_tag->tag_mss = cpu_to_be16(mss); + tx_tag->tag_num = sg->base_mss_no | (sg->flag & NE6X_SG_JUMBO_FLAG) | + (sg->flag & NE6X_SG_LST_SG_FLAG) | + (sg->flag & NE6X_SG_FST_SG_FLAG); + tx_tag->tag_num = cpu_to_be16(tx_tag->tag_num); +} + +static void ne6x_xmit_jumbo(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + int j = 0; + struct ne6x_sg_list *sgl = tx_ring->sgl; + struct ne6x_sg_info *sg; + dma_addr_t dma, tag_dma; + struct sk_buff *skb = first->skb; + struct ne6x_tx_buf *tx_bi; + struct ne6x_tx_tag *tag_desc = tx_tag; + u32 i = tx_ring->next_to_use; + struct ne6x_tx_desc *tx_desc = NE6X_TX_DESC(tx_ring, i); + + for (; j < sgl->sg_num; j++) { + sg = &sgl->sg[j]; + if (likely(sg->flag & NE6X_SG_FRAG_FLAG)) { + dma = skb_frag_dma_map(tx_ring->dev, sg->p, sg->offset, sg->len, + DMA_TO_DEVICE); + } else { + dma = dma_map_single(tx_ring->dev, sg->p + sg->offset, sg->len, + DMA_TO_DEVICE); + } + + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + tx_bi = &tx_ring->tx_buf[i]; + + dma_unmap_len_set(tx_bi, len, sg->len); + + dma_unmap_addr_set(tx_bi, dma, dma); + + if (sg->flag & NE6X_SG_SOP_FLAG) { + tag_dma = tag_ring->dma + tag_ring->next_to_use * NE6X_TX_PRIV_TAG_SIZE; + tag_desc = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + ne6x_fill_tx_priv_tag(tx_ring, tag_desc, sgl->mss, sg); + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + } else { + tag_dma = 0; + } + + tx_desc = NE6X_TX_DESC(tx_ring, i); + ne6x_fill_tx_desc(tx_desc, tx_ring->reg_idx, tag_dma, dma, sg); + if (++i == tx_ring->count) + i = 0; + } + tx_ring->next_to_use = i; + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + first->jumbo_finsh = 1u; + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +static void ne6x_xmit_simple(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + struct sk_buff *skb = first->skb; + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + struct ne6x_tx_desc *tx_desc, *first_desc; + unsigned int size = skb_headlen(skb); + u32 i = tx_ring->next_to_use; + struct ne6x_tx_tag *ttx_desc; + struct ne6x_tx_buf *tx_bi; + bool is_first = true; + int send_len = 0; + skb_frag_t *frag; + dma_addr_t dma; + __le64 mss = 0; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + first_desc = NE6X_TX_DESC(tx_ring, i); + tx_desc = NE6X_TX_DESC(tx_ring, i); + mss = tx_desc->mss; + tx_desc->sop_valid = 1; + tx_desc->eop_valid = 0; + tx_bi = first; + + ttx_desc = (struct ne6x_tx_tag *)tx_tag; + ttx_desc->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + ttx_desc->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + ttx_desc->tag_vport = (comm->port_info >> 8) & 0xFF; + ttx_desc->tag_mss = tx_tag->tag_mss; + ttx_desc->tag_num = 0x0; + send_len += size; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + ne6x_tx_desc_push(tx_desc, dma, size); + tx_desc->vp = tx_ring->reg_idx; + tx_desc->tso = 0x0; + tx_desc->l3_csum = 0x00; + tx_desc->l3_ofst = 0x00; + tx_desc->l4_csum = 0x00; + tx_desc->l4_ofst = 0x00; + tx_desc->pld_ofst = 0x00; + tx_desc->u.val = 0x0; + tx_desc->rsv4 = 0; + if (is_first) { + tx_desc->sop_valid = 1u; + is_first = false; + tx_desc->sop_cnt = 32; + tx_desc->buffer_sop_addr = cpu_to_le64(first->tag_dma); + } + + if (send_len == skb->len) { + tx_desc->eop_valid = 1u; + break; + } + if (++i == tx_ring->count) + i = 0; + + tx_desc = NE6X_TX_DESC(tx_ring, i); + + size = skb_frag_size(frag); + send_len += size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_buf[i]; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame) +{ + struct ne6x_tx_tag *tx_tagx = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + struct ne6x_tx_buf *first; + int tso, count; + + /* prefetch the data, we'll need it later */ + prefetch(tx_tagx); + prefetch(skb->data); + + if (!jumbo_frame) { + count = ne6x_xmit_descriptor_count(skb); + } else { + if (ne6x_fill_jumbo_sgl(tx_ring, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + count = tx_ring->sgl->sg_num; + } + /* reserve 5 descriptors to avoid tail over-write */ + if (ne6x_maybe_stop_tx(tx_ring, count + 4 + 1)) { + /* this is a hard error */ + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buf[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* record initial flags and protocol */ + + first->jumbo_frame = 0; + first->jumbo_finsh = 0; + first->tag_dma = tag_ring->dma + tag_ring->next_to_use * sizeof(struct ne6x_tx_tag); + memset(tx_tagx, 0x00, sizeof(*tx_tagx)); + + ne6x_tx_prepare_vlan_flags(tx_ring, first, tx_tagx); + + tso = ne6x_tso(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tso = ne6x_tx_csum(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tx_tagx->tag_mss = cpu_to_be16(tx_tagx->tag_mss); + + if (!jumbo_frame) { + ne6x_xmit_simple(tx_ring, first, tag_ring, tx_tagx); + } else { + first->jumbo_frame = true; + ne6x_xmit_jumbo(tx_ring, first, tag_ring, tx_tagx); + } + + return NETDEV_TX_OK; + +out_drop: + ne6x_unmap_and_free_tx_resource(tx_ring, first); + + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.h b/drivers/net/ethernet/bzwx/nce/comm/txrx.h new file mode 100644 index 00000000000000..c7c4e068af0cc0 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.h @@ -0,0 +1,478 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _TXRX_H +#define _TXRX_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define NE6X_MAX_NUM_DESCRIPTORS (16 * 1024) +#define NE6X_DEFAULT_NUM_DESCRIPTORS (4 * 1024) +#define NE6X_MIN_NUM_DESCRIPTORS 64 +#define NE6X_REQ_DESCRIPTOR_MULTIPLE 32 + +#define NE6X_MAX_BUFFER_TXD 8 +#define NE6X_MIN_TX_LEN 60 + +#define NE6X_TAIL_REG_NUM 4 +#define NE6X_RX_BUFFER_WRITE 32 /* Must be power of 2 */ + +/* The size limit for a transmit buffer in a descriptor is 15K. + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define NE6X_MAX_READ_REQ_SIZE 4096 +#define NE6X_MAX_DATA_PER_TXD (15500 - 32 - 4 - 1) +#define NE6X_MAX_DATA_PER_TXD_ALIGNED \ + (NE6X_MAX_DATA_PER_TXD & ~(NE6X_MAX_READ_REQ_SIZE - 1)) + +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define NE6X_PACKET_HDR_PAD ETH_HLEN +#define NE6X_RXBUFFER_256 256 +#define NE6X_RXBUFFER_2048 2048 +#define NE6X_RXBUFFER_4096 4096 /* Used for large frames w/ padding */ +/*CIU buffer max len is 15k*/ +#define NE6X_MAX_RXBUFFER 15360 /* largest size for single descriptor */ +#define NE6X_MIN_MTU_SIZE 128 +#define NE6X_RX_HDR_SIZE NE6X_RXBUFFER_256 + +#define NE6X_TX_PRIV_TAG_SIZE 32 +#define NE6X_TX_DESC_SIZE 32 +/* iterator for handling rings in ring container */ +#define ne6x_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define NE6X_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define NE6X_RX_DESC(R, i) (&(((union ne6x_rx_desc *)((R)->desc))[i])) +#define NE6X_TX_DESC(R, i) (&(((struct ne6x_tx_desc *)((R)->desc))[i])) +#define NE6X_TX_TAG(R, i) (&(((struct ne6x_tx_tag *)((R)->desc))[i])) +#define NE6X_CQ_DESC(R, i) (&(((struct ne6x_cq_desc *)((R)->desc))[i])) + +#define NE6X_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? \ + 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) + +struct ne6x_tx_desc_status { + /* pkt drop */ + u8 tx_drop_addr : 1; + u8 rsv3 : 1; + u8 rsv2 : 1; + /* normal */ + u8 tx_done : 1; + /* ecc error */ + u8 tx_ecc_err : 1; + u8 rsv1 : 1; + u8 rsv0 : 1; + /* pcie error */ + u8 tx_pcie_read_err : 1; +}; + +struct ne6x_tx_desc { + union { + /* Hardware write back*/ + struct ne6x_tx_desc_status flags; + u8 val; + } u; + + u8 rsv0 : 1; + u8 vp : 7; + u8 event_trigger : 1; + u8 chain : 1; + u8 transmit_type : 2; + u8 sop_valid : 1; + u8 eop_valid : 1; + u8 tso : 1; + u8 rsv1 : 1; + u8 rsv2; + u8 rsv3; + + u8 l3_csum : 1; + u8 l3_ofst : 7; + u8 l4_csum : 1; + u8 l4_ofst : 7; + u8 pld_ofst; + + __le64 mop_cnt : 24; + __le64 sop_cnt : 16; + __le64 rsv4 : 8; + __le64 mss : 16; + __le64 buffer_mop_addr; + __le64 buffer_sop_addr; +}; + +struct ne6x_tx_tag { + u8 resv0; + u8 tag_pi1 : 1; + u8 resv1 : 7; + u8 l3_csum : 1; + u8 l4_csum : 1; + u8 vxl_l3_csum : 1; + u8 vxl_l4_csum : 1; + u8 tag_resv : 3; + u8 tag_pi0 : 1; + u8 tag_vport; + u16 tag_vlan1; /* 1q vlan */ + u16 tag_vlan2; /* 1ad vlan */ + + __le64 resv2 : 32; + __le64 tag_num : 16; + __le64 tag_mss : 16; /* mss */ + + u8 l3_ofst; + u8 l4_ofst; + u16 l4_len; /* l4hdr + pld_size */ + u8 vxl_l3_ofst; + u8 vxl_l4_ofst; + u16 vxl_l4_len; /* l4hdr + pld_size */ + + __le64 resv3; +}; + +struct ne6x_tx_buf { + struct ne6x_tx_desc *next_to_watch; + struct sk_buff *skb; + u32 bytecount; + u8 jumbo_frame; /* fragment when bytecount > 15.5KB*/ + u8 jumbo_finsh; /* when last frame of jumbo packet transmitted, set it 1 */ + u16 rsv; + int napi_budget; /* when bytecount > 15.5KB, accumulating NPAI trigger count + * in transmit irq handler + */ + u16 gso_segs; + dma_addr_t tag_dma; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct ne6x_rx_desc_status { + u8 rx_mem_err : 1; /* MOP_MEM_ADDR/SOP_MEM_ADDR/MOP_MEM_LEN=0, pkt need drop */ + u8 rx_mem_ovflow : 1; /* SOP_MEM_OVFLOW ==1, mop have pkt */ + u8 rsv : 1; + u8 rx_eop : 1; /* EOP flag */ + u8 rx_csum_err : 1; /* checksum error */ + u8 rx_err : 1; /* Not enough descriptors */ + u8 rx_mem_used : 1; /* MEM_USED, Normal */ + u8 pd_type : 1; /* 0 ingress pd, 1 egress pd */ +}; + +#define NE6X_RX_DESC_STATUS_EOF_SHIFT 3 +#define NE6X_RX_DESC_STATUS_ERR_SHIFT 0 + +/* Receive Descriptor */ +union ne6x_rx_desc { + struct { + u8 rsv3; + u8 rsv2 : 1; + u8 vp : 7; + __le16 mop_mem_len; + __le16 sop_mem_len; + __le16 rsv1; + __le64 buffer_sop_addr; + __le64 buffer_mop_addr; + + __le64 rsv0; + } w; /* write */ + + struct { + union { + struct ne6x_rx_desc_status flags; + u8 val; + } u; + u8 rsv2 : 1; + u8 vp : 7; + u8 pd[24]; + __le16 rsv0; + __le16 rsv1; + __le16 pkt_len; + } wb; /* Writeback */ +}; + +struct ne6x_tx_cq_desc { + u8 cq_tx_stats; + u16 cq_tx_offset; +} __packed; + +struct ne6x_rx_cq_desc { + u8 cq_rx_stats; + u16 cq_rx_len; + u16 cq_rx_offset; +} __packed; + +struct ne6x_cq_desc { + u8 ctype : 1; + u8 rsv0 : 3; + u8 num : 4; + u8 rsv1; + + union { + struct ne6x_tx_cq_desc tx_cq[10]; + struct ne6x_rx_cq_desc rx_cq[6]; + u8 data[30]; + } payload; +}; + +struct ne6x_rx_buf { + dma_addr_t dma; + struct page *page; + u32 page_offset; + u16 pagecnt_bias; +}; + +struct ne6x_q_stats { + u64 packets; + u64 bytes; +}; + +struct ne6x_txq_stats { + u64 restart_q; + u64 tx_busy; + u64 tx_linearize; + u64 csum_err; + u64 csum_good; + u64 tx_pcie_read_err; + u64 tx_ecc_err; + u64 tx_drop_addr; +}; + +struct ne6x_rxq_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buf_failed; + u64 page_reuse_count; + u64 csum_err; + u64 csum_good; + u64 rx_mem_error; + u64 rx_err; +}; + +struct ne6x_cq_stats { + u64 cq_num; + u64 tx_num; + u64 rx_num; +}; + +#define NE6X_SG_SOP_FLAG BIT(0) +#define NE6X_SG_EOP_FLAG BIT(1) +#define NE6X_SG_FST_SG_FLAG BIT(13) +#define NE6X_SG_LST_SG_FLAG BIT(14) +#define NE6X_SG_JUMBO_FLAG BIT(15) +#define NE6X_SG_FRAG_FLAG BIT(4) +#define NE6X_MAX_DESC_NUM_PER_SKB 16 + +struct ne6x_sg_info { + void *p; + u16 offset; + u16 len; + u16 flag; + u16 base_mss_no; +}; + +struct ne6x_sg_list { + u16 sg_num; + u16 mss; + u16 sgl_mss_cnt; + struct ne6x_sg_info sg[NE6X_MAX_DESC_NUM_PER_SKB]; +}; + +/* descriptor ring, associated with a adapter */ +struct ne6x_ring { + /* CL1 - 1st cacheline starts here */ + void *adpt; + struct ne6x_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + struct ne6x_q_vector *q_vector; /* Backreference to associated vector */ + + u64 __iomem *tail; + + struct ne6x_sg_list *sgl; + + union { + struct ne6x_tx_buf *tx_buf; + struct ne6x_rx_buf *rx_buf; + }; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + u16 cq_last_expect; + + u16 queue_index; /* Queue number of ring */ + u16 rx_buf_len; + + /* stats structs */ + struct ne6x_q_stats stats; + struct u64_stats_sync syncp; + + union { + struct ne6x_txq_stats tx_stats; + struct ne6x_rxq_stats rx_stats; + struct ne6x_cq_stats cq_stats; + }; + + struct rcu_head rcu; /* to avoid race on free */ + dma_addr_t dma; /* physical address of ring */ + unsigned int size; /* length of descriptor ring in bytes */ + struct sk_buff *skb; /* When ne6x_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * ne6x_clean_rx_ring_irq() is called + * for this ring. + */ +} ____cacheline_internodealigned_in_smp; + +struct ne6x_ring_container { + /* head of linked-list of rings */ + struct ne6x_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; +}; + +union rx_ol_flags { + u32 ol_flags; /* Offload Feature Bits. */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 rx_tnl_csum :1; + u32 rsv0 :1; + u32 tag_num :8; + u32 rsv1 :6; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 rsv1 :6; + u32 tag_num :8; + u32 rsv0 :1; + u32 rx_tnl_csum :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; +#endif + } flag_bits; +}; + +struct rx_hdr_info { + union rx_ol_flags ol_flag; + u32 rss_hash; /* RSS Hash Value */ + u32 vlan_tci_outer:16; /* VLAN Outer Tag Control Identifier */ + u32 vlan_tci:16; /* VLAN Tag Control Identifier */ +}; + +#define NE6X_INT_NAME_STR_LEN (IFNAMSIZ + 16) + +/* struct that defines an interrupt vector */ +struct ne6x_q_vector { + void *adpt; + + u16 v_idx; /* index in the adpt->q_vector array. */ + u16 reg_idx; + + struct napi_struct napi; + + struct ne6x_ring_container rx; + struct ne6x_ring_container tx; + struct ne6x_ring_container cq; + struct ne6x_ring_container tg; + + u8 num_ringpairs; /* total number of ring pairs in vector */ + + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + + char name[NE6X_INT_NAME_STR_LEN]; +} ____cacheline_internodealigned_in_smp; + +#define DESC_NEEDED (MAX_SKB_FRAGS + 6) + +static inline unsigned int ne6x_rx_pg_order(struct ne6x_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define ne6x_rx_pg_size(_ring) (PAGE_SIZE << ne6x_rx_pg_order(_ring)) + +static inline struct netdev_queue *txring_txq(const struct ne6x_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget); +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget); +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget); +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame); +void ne6x_tail_update(struct ne6x_ring *ring, int val); +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring); +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring); +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring); +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring); +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring); + +#endif + +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); diff --git a/drivers/net/ethernet/bzwx/nce/comm/version.h b/drivers/net/ethernet/bzwx/nce/comm/version.h new file mode 100644 index 00000000000000..9affdb9803b1f6 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _VERSION_H +#define _VERSION_H + +#define VERSION "1.0.4" + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h new file mode 100644 index 00000000000000..1206d8ab3cfd19 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h @@ -0,0 +1,468 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_H +#define _NE6X_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "feature.h" +#include "txrx.h" +#include "common.h" +#include "ne6x_txrx.h" +#include "ne6x_ethtool.h" +#include "ne6x_procfs.h" +#include "ne6x_virtchnl_pf.h" +#include "version.h" + +#define NE6X_MAX_VP_NUM 64 +#define NE6X_PF_VP0_NUM 64 +#define NE6X_PF_VP1_NUM 65 +#define NE6X_MAILBOX_VP_NUM NE6X_PF_VP0_NUM +#define NE6X_MAX_MSIX_NUM 72 +#define NE6X_MIN_MSIX 2 + +#define NE6X_NIC_INT_VP 71 +#define NE6X_NIC_INT_START_BIT 42 + +#define wr64(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr0) + (reg))) +#define rd64(a, reg) \ + readq((void __iomem *)((a)->hw_addr0) + (reg)) +#define wr64_bar4(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr4) + (reg))) +#define rd64_bar4(a, reg) \ + readq((void __iomem *)((a)->hw_addr4) + (reg)) + +#define ne6x_pf_to_dev(pf) (&((pf)->pdev->dev)) +#define ne6x_get_vf_by_id(pf, vf_id) (&((pf)->vf[vf_id])) + +#define ADPT_PPORT(adpt) ((adpt)->port_info->hw_port_id) +#define ADPT_LPORT(adpt) ((adpt)->port_info->lport) +#define ADPT_VPORT(adpt) ((adpt)->vport) +#define ADPT_VPORTCOS(adpt) ((adpt)->base_queue + 160) + +enum ne6x_adapter_type { + NE6X_ADPT_PF = 0, + NE6X_ADPT_VF, +}; + +enum ne6x_adapter_flags { + NE6X_ADPT_F_DISABLE_FW_LLDP, + NE6X_ADPT_F_LINKDOWN_ON_CLOSE, + NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, + NE6X_ADPT_F_DDOS_SWITCH, + NE6X_ADPT_F_ACL, + NE6X_ADPT_F_TRUST_VLAN, + NE6X_ADPT_F_NBITS /* must be last */ +}; + +enum ne6x_pf_state { + NE6X_TESTING, + NE6X_DOWN, + NE6X_SERVICE_SCHED, + NE6X_INT_INIT_DOWN, + NE6X_CLIENT_SERVICE_REQUESTED, + NE6X_LINK_POOLING, + NE6X_CONFIG_BUSY, + NE6X_TIMEOUT_RECOVERY_PENDING, + NE6X_PF_RESET_REQUESTED, + NE6X_CORE_RESET_REQUESTED, + NE6X_GLOBAL_RESET_REQUESTED, + NE6X_RESET_INTR_RECEIVED, + NE6X_DOWN_REQUESTED, + NE6X_VF_DIS, + NE6X_MAILBOXQ_EVENT_PENDING, + NE6X_PF_INTX, + NE6X_PF_MSI, + NE6X_PF_MSIX, + NE6X_FLAG_SRIOV_ENA, + NE6X_REMOVE, + NE6X_STATE_NBITS /* must be last */ +}; + +enum { + NE6X_ETHTOOL_FLASH_810_LOADER = 0, + NE6X_ETHTOOL_FLASH_810_APP = 1, + NE6X_ETHTOOL_FLASH_807_APP = 2, + NE6X_ETHTOOL_FLASH_NP = 3, + NE6X_ETHTOOL_FLASH_PXE = 4, + NE6X_ETHTOOL_FRU = 0xf2, +}; + +/* MAC addr list head node struct */ +struct mac_addr_head { + struct list_head list; + struct mutex mutex; /* mutex */ +}; + +/* MAC addr list node struct */ +struct mac_addr_node { + struct list_head list; + u8 addr[32]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_FW_VER_NORMAL = 0x0, + NE6X_FW_VER_WHITELIST = 0x100, +}; + +struct ne6x_lump_tracking { + u16 num_entries; + u16 list[]; +}; + +struct ne6x_hw_port_stats { + u64 mac_rx_eth_byte; + u64 mac_rx_eth; + u64 mac_rx_eth_undersize; + u64 mac_rx_eth_crc; + u64 mac_rx_eth_64b; + u64 mac_rx_eth_65_127b; + u64 mac_rx_eth_128_255b; + u64 mac_rx_eth_256_511b; + u64 mac_rx_eth_512_1023b; + u64 mac_rx_eth_1024_15360b; + u64 mac_tx_eth_byte; + u64 mac_tx_eth; + u64 mac_tx_eth_undersize; + u64 mac_tx_eth_64b; + u64 mac_tx_eth_65_127b; + u64 mac_tx_eth_128_255b; + u64 mac_tx_eth_256_511b; + u64 mac_tx_eth_512_1023b; + u64 mac_tx_eth_1024_15360b; +}; + +/* struct that defines a adapter, associated with a dev */ +struct ne6x_adapter { + struct ne6x_adapt_comm comm; + struct net_device *netdev; + struct ne6x_pf *back; /* back pointer to PF */ + struct ne6x_port_info *port_info; /* back pointer to port_info */ + struct ne6x_ring **rx_rings; /* Rx ring array */ + struct ne6x_ring **tx_rings; /* Tx ring array */ + struct ne6x_ring **cq_rings; /* Tx ring array */ + struct ne6x_ring **tg_rings; /* Tx tag ring array */ + struct ne6x_q_vector **q_vectors; /* q_vector array */ + + /* used for loopback test */ + char *send_buffer; + wait_queue_head_t recv_notify; + u8 recv_done; + + irqreturn_t (*irq_handler)(int irq, void *data); + + u32 tx_restart; + u32 tx_busy; + u32 rx_buf_failed; + u32 rx_page_failed; + u16 num_q_vectors; + u16 base_vector; /* IRQ base for OS reserved vectors */ + enum ne6x_adapter_type type; + struct ne6x_vf *vf; /* VF associated with this adapter */ + u16 idx; /* software index in pf->adpt[] */ + u16 max_frame; + u16 rx_buf_len; + struct rtnl_link_stats64 net_stats; + struct rtnl_link_stats64 net_stats_offsets; + struct ne6x_eth_stats eth_stats; + struct ne6x_eth_stats eth_stats_offsets; + struct ne6x_rss_info rss_info; + int rss_size; + + bool irqs_ready; + bool current_isup; /* Sync 'link up' logging */ + u16 current_speed; + u16 vport; + u16 num_queue; /* Used queues */ + u16 base_queue; /* adapter's first queue in hw array */ + u16 num_tx_desc; + u16 num_rx_desc; + u16 num_cq_desc; + u16 num_tg_desc; + + u32 hw_feature; + bool netdev_registered; + + /* unicast MAC head node */ + struct mac_addr_head uc_mac_addr; + /* multicast MAC head node */ + struct mac_addr_head mc_mac_addr; + + struct work_struct set_rx_mode_task; + + struct ne6x_hw_port_stats stats; + DECLARE_BITMAP(flags, NE6X_ADPT_F_NBITS); + + struct list_head vlan_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + + /* aRFS members only allocated for the PF ADPT */ +#define NE6X_MAX_RFS_FILTERS 0xFFFF +#define NE6X_MAX_ARFS_LIST 1024 +#define NE6X_ARFS_LST_MASK (NE6X_MAX_ARFS_LIST - 1) + struct hlist_head *arfs_fltr_list; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ + atomic_t *arfs_last_fltr_id; +} ____cacheline_internodealigned_in_smp; + +struct ne6x_dev_eeprom_info { + u8 vendor_id[3]; + u8 ocp_record_version; + u8 max_power_s0; + u8 max_power_s5; + u8 hot_card_cooling_passive_tier; + u8 cold_card_cooling_passive_tier; + u8 cooling_mode; + u16 hot_standby_airflow_require; + u16 cold_standby_airflow_require; + u8 uart_configuration_1; + u8 uart_configuration_2; + u8 usb_present; + u8 manageability_type; + u8 fru_write_protection; + u8 prog_mode_power_state_supported; + u8 hot_card_cooling_active_tier; + u8 cold_card_cooling_active_tier; + u8 transceiver_ref_power_Level; + u8 transceiver_ref_temp_Level; + u8 card_thermal_tier_with_local_fan_fail; + u16 product_mode; + u8 is_pcie_exist; + u32 logic_port_to_phyical; + u8 resv[3]; + u8 number_of_physical_controllers; + u8 control_1_udid[16]; + u8 control_2_udid[16]; + u8 control_3_udid[16]; + u8 control_4_udid[16]; + u32 hw_feature; + u32 hw_flag; + u8 port_0_mac[6]; + u8 port_1_mac[6]; + u8 port_2_mac[6]; + u8 port_3_mac[6]; + u8 rsv[9]; + u32 spd_verify_value; +} __packed; + +struct ne6x_hw { + u64 __iomem *hw_addr0; + u64 __iomem *hw_addr2; + u64 __iomem *hw_addr4; + + struct ne6x_port_info *port_info; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 dvm_ena; /* double vlan enable */ + struct ne6x_pf *back; + struct ne6x_bus_info bus; + u16 pf_port; + + u32 expect_vp; + u32 max_queue; + + struct ne6x_mbx_snapshot mbx_snapshot; + u8 ne6x_mbx_ready_to_send[64]; +}; + +#define ne6x_hw_to_dev(ptr) (&(container_of((ptr), struct ne6x_pf, hw))->pdev->dev) + +struct ne6x_firmware_ver_info { + u32 firmware_soc_ver; + u32 firmware_np_ver; + u32 firmware_pxe_ver; +}; + +/* struct that defines the Ethernet device */ +struct ne6x_pf { + struct pci_dev *pdev; + + /* OS reserved IRQ details */ + struct msix_entry *msix_entries; + u16 ctrl_adpt_idx; /* control adapter index in pf->adpt array */ + + struct ne6x_adapter **adpt; /* adapters created by the driver */ + + struct mutex switch_mutex; /* switch_mutex */ + struct mutex mbus_comm_mutex; /* mbus_comm_mutex */ + struct timer_list serv_tmr; + struct timer_list linkscan_tmr; + unsigned long service_timer_period; + struct work_struct serv_task; + struct work_struct linkscan_work; + + /* Virtchnl/SR-IOV config info */ + struct ne6x_vf *vf; + u16 num_alloc_vfs; + u16 num_qps_per_vf; + + u16 next_adpt; /* Next free slot in pf->adpt[] - 0-based! */ + u16 num_alloc_adpt; + + DECLARE_BITMAP(state, NE6X_STATE_NBITS); + + u32 tx_timeout_count; + u32 tx_timeout_recovery_level; + unsigned long tx_timeout_last_recovery; + struct ne6x_firmware_ver_info verinfo; + struct ne6x_dev_eeprom_info sdk_spd_info; + + struct ne6x_hw hw; + struct ne6x_lump_tracking *irq_pile; +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6x_dbg_pf; + struct dentry *ne6x_dbg_info_pf; +#endif /* CONFIG_DEBUG_FS */ + struct proc_dir_entry *ne6x_proc_pf; + struct list_head key_filter_list; + spinlock_t key_list_lock; /* Lock to protect accesses to key filter */ + + char link_intname[NE6X_INT_NAME_STR_LEN]; + char mailbox_intname[NE6X_INT_NAME_STR_LEN]; + bool link_int_irq_ready; + bool mailbox_int_irq_ready; + bool is_fastmode; + u32 hw_flag; + u32 dump_info; + u16 dev_type; +}; + +static inline void ne6x_adpt_setup_irqhandler(struct ne6x_adapter *adpt, + irqreturn_t (*irq_handler)(int, void *)) +{ + adpt->irq_handler = irq_handler; +} + +struct ne6x_netdev_priv { + struct ne6x_adapter *adpt; +}; + +static inline bool ne6x_is_supported_port_vlan_proto(struct ne6x_hw *hw, + u16 vlan_proto) +{ + bool is_supported = false; + + switch (vlan_proto) { + case ETH_P_8021Q: + is_supported = true; + break; + case ETH_P_8021AD: + if (hw->dvm_ena) + is_supported = true; + break; + } + + return is_supported; +} + +static inline struct ne6x_pf *ne6x_netdev_to_pf(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt->back; +} + +static inline struct ne6x_adapter *ne6x_netdev_to_adpt(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt; +} + +#define NE6X_VLAN(tpid, vid, prio) \ + ((struct ne6x_vlan){ tpid, vid, prio }) + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt); + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value); +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr); +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable); +void ne6x_update_pf_stats(struct ne6x_adapter *adpt); +void ne6x_service_event_schedule(struct ne6x_pf *pf); + +void ne6x_down(struct ne6x_adapter *adpt); +int ne6x_up(struct ne6x_adapter *adpt); +int ne6x_adpt_configure(struct ne6x_adapter *adpt); +void ne6x_adpt_close(struct ne6x_adapter *adpt); + +int ne6x_alloc_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt); + +int ne6x_close(struct net_device *netdev); +int ne6x_open(struct net_device *netdev); +int ne6x_adpt_open(struct ne6x_adapter *adpt); +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt); +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt); +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors); +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt); +bool netif_is_ne6x(struct net_device *dev); + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate); + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +void ne6x_sync_features(struct net_device *netdev); + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf); +void ne6x_linkscan_schedule(struct ne6x_pf *pf); + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c new file mode 100644 index 00000000000000..ed814d666b2dec --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +#ifdef CONFIG_RFS_ACCEL + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add); + +static int ne6x_dev_add_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + u32 table_id = 0xffffffff; + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + dev_info(dev, "add: vport: %d %x %x %x %x %d %d rxq_id:%d\n", adpt->vport, + input->ip.v4.dst_ip, input->ip.v4.src_ip, input->ip.v4.dst_port, + input->ip.v4.src_port, input->ip.v4.pi, input->ip.v4.proto, input->q_index); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + /* hash data */ + memcpy(&fster.data, &input->data, sizeof(fster.data)); + + /* flow steer info */ + for (index = 0; index < 24; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + + if (ret == -ENOENT) { + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster), &table_id); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "insert flow steer table fail %02x\n", + ADPT_LPORT(adpt)); + } else { + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_ARFS_TABLE, result.key_index + 8, + (u32 *)&fster.data, sizeof(fster.data)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "update flow steer table fail ret:%d\n", + ret); + } + + return 0; +} + +static int ne6x_dev_del_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + dev_info(dev, "del: vport: %d %x %x %x %x %d %d rxq_id:%d\n", + adpt->vport, input->ip.v4.dst_ip, input->ip.v4.src_ip, input->ip.v4.dst_port, + input->ip.v4.src_port, input->ip.v4.pi, input->ip.v4.proto, input->q_index); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + + /* flow steer info */ + for (index = 0; index < 16; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + if (!ret) { + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_ARFS_TABLE, + (u32 *)&fster.ip, sizeof(fster.ip)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "delete flow steer table fail ret:%d\n", + ret); + } else { + dev_err(ne6x_pf_to_dev(adpt->back), "search flow steer table fail ret:%d\n", ret); + } + return 0; +} + +static bool ne6x_is_arfs_active(struct ne6x_adapter *adpt) +{ + return !!adpt->arfs_fltr_list; +} + +static bool +ne6x_arfs_is_flow_expired(struct ne6x_adapter *adpt, struct ne6x_arfs_entry *arfs_entry) +{ +#define NE6X_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) + if (rps_may_expire_flow(adpt->netdev, arfs_entry->fltr_info.q_index, + arfs_entry->flow_id, + arfs_entry->fltr_info.fltr_id)) + return true; + + /* expiration timer only used for UDP filters */ + if (arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV4_UDP && + arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + return false; + + return time_in_range64(arfs_entry->time_activated + + NE6X_ARFS_TIME_DELTA_EXPIRATION, + arfs_entry->time_activated, get_jiffies_64()); +} + +static void +ne6x_arfs_update_flow_rules(struct ne6x_adapter *adpt, u16 idx, + struct hlist_head *add_list, + struct hlist_head *del_list) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + /* go through the aRFS hlist at this idx and check for needed updates */ + hlist_for_each_entry_safe(e, n, &adpt->arfs_fltr_list[idx], list_entry) { + /* check if filter needs to be added to HW */ + if (e->fltr_state == NE6X_ARFS_INACTIVE) { + enum ne6x_fltr_ptype flow_type = e->fltr_info.flow_type; + struct ne6x_arfs_entry_ptr *ep = + devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC); + + if (!ep) + continue; + INIT_HLIST_NODE(&ep->list_entry); + /* reference aRFS entry to add HW filter */ + ep->arfs_entry = e; + hlist_add_head(&ep->list_entry, add_list); + e->fltr_state = NE6X_ARFS_ACTIVE; + /* expiration timer only used for UDP flows */ + if (flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + e->time_activated = get_jiffies_64(); + } else if (e->fltr_state == NE6X_ARFS_ACTIVE) { + /* check if filter needs to be removed from HW */ + if (ne6x_arfs_is_flow_expired(adpt, e)) { + /* remove aRFS entry from hash table for delete + * and to prevent referencing it the next time + * through this hlist index + */ + hlist_del(&e->list_entry); + e->fltr_state = NE6X_ARFS_TODEL; + /* save reference to aRFS entry for delete */ + hlist_add_head(&e->list_entry, del_list); + } + } + } +} + +static int ne6x_arfs_add_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *add_list_head) +{ + struct ne6x_arfs_entry_ptr *ep; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { + int result; + + result = ne6x_dev_add_fster_rules(adpt, &ep->arfs_entry->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, ep->arfs_entry, true); + else + dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, ep->arfs_entry->fltr_state, + ep->arfs_entry->fltr_info.fltr_id, + ep->arfs_entry->flow_id, + ep->arfs_entry->fltr_info.q_index); + + hlist_del(&ep->list_entry); + devm_kfree(dev, ep); + } + + return 0; +} + +static int ne6x_arfs_del_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *del_list_head) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { + int result; + + result = ne6x_dev_del_fster_rules(adpt, &e->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, e, false); + else + dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, e->fltr_state, e->fltr_info.fltr_id, + e->flow_id, e->fltr_info.q_index); + + /* The aRFS hash table is no longer referencing this entry */ + hlist_del(&e->list_entry); + devm_kfree(dev, e); + } + + return 0; +} + +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) +{ + struct ne6x_adapter *pf_adpt; + unsigned int i; + u8 idx = 0; + + ne6x_for_each_pf(pf, idx) { + HLIST_HEAD(tmp_del_list); + HLIST_HEAD(tmp_add_list); + + pf_adpt = pf->adpt[idx]; + + if (!pf_adpt) + continue; + + if (unlikely(!(pf_adpt->netdev->features & NETIF_F_NTUPLE))) + continue; + + if (!ne6x_is_arfs_active(pf_adpt)) + continue; + + spin_lock_bh(&pf_adpt->arfs_lock); + /* Once we process aRFS for the PF ADPT get out */ + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + ne6x_arfs_update_flow_rules(pf_adpt, i, &tmp_add_list, + &tmp_del_list); + spin_unlock_bh(&pf_adpt->arfs_lock); + + /* use list of ne6x_arfs_entry(s) for delete */ + ne6x_arfs_del_flow_rules(pf_adpt, &tmp_del_list); + + /* use list of ne6x_arfs_entry(s) for add */ + ne6x_arfs_add_flow_rules(pf_adpt, &tmp_add_list); + } +} + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add) +{ + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = adpt->arfs_fltr_cntrs; + + switch (entry->fltr_info.flow_type) { + case NE6X_FLTR_PTYPE_NONF_IPV4_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv6_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV4_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv6_cnt); + break; + default: + dev_err(ne6x_pf_to_dev(adpt->back), "aRFS: Failed to update filter counters, invalid filter type %d\n", + entry->fltr_info.flow_type); + } +} + +static bool +ne6x_arfs_cmp(struct ne6x_fster_fltr *fltr_info, const struct flow_keys *fk) +{ + bool is_v4; + + if (!fltr_info || !fk) + return false; + + is_v4 = (fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_TCP); + + if (fk->basic.n_proto == htons(ETH_P_IP) && is_v4) + return (fltr_info->ip.v4.proto == fk->basic.ip_proto && + fltr_info->ip.v4.src_port == fk->ports.src && + fltr_info->ip.v4.dst_port == fk->ports.dst && + fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src && + fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst); + + else if (fk->basic.n_proto == htons(ETH_P_IPV6) && !is_v4) + return (fltr_info->ip.v6.proto == fk->basic.ip_proto && + fltr_info->ip.v6.src_port == fk->ports.src && + fltr_info->ip.v6.dst_port == fk->ports.dst && + !memcmp(&fltr_info->ip.v6.src_ip, + &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&fltr_info->ip.v6.dst_ip, + &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr))); + + return false; +} + +static struct ne6x_arfs_entry * +ne6x_arfs_build_entry(struct ne6x_adapter *adpt, const struct flow_keys *fk, + u32 hash, u16 rxq_idx, u32 flow_id) +{ + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_fster_fltr *fltr_info; + u8 ip_proto; + + arfs_entry = devm_kzalloc(ne6x_pf_to_dev(adpt->back), + sizeof(*arfs_entry), + GFP_ATOMIC | __GFP_NOWARN); + if (!arfs_entry) + return NULL; + + fltr_info = &arfs_entry->fltr_info; + fltr_info->q_index = rxq_idx; + fltr_info->dest_adpt = adpt->idx; + ip_proto = fk->basic.ip_proto; + + if (fk->basic.n_proto == htons(ETH_P_IP)) { + fltr_info->ip.v4.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV4_TCP : + NE6X_FLTR_PTYPE_NONF_IPV4_UDP; + fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; + fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; + fltr_info->ip.v4.src_port = fk->ports.src; + fltr_info->ip.v4.dst_port = fk->ports.dst; + fltr_info->ip.v4.proto = fk->basic.ip_proto; + fltr_info->ip.v4.pi = ADPT_LPORT(adpt); + } else { /* ETH_P_IPV6 */ + fltr_info->ip.v6.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV6_TCP : + NE6X_FLTR_PTYPE_NONF_IPV6_UDP; + memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); + fltr_info->ip.v6.src_port = fk->ports.src; + fltr_info->ip.v6.dst_port = fk->ports.dst; + fltr_info->ip.v6.proto = fk->basic.ip_proto; + fltr_info->ip.v6.pi = ADPT_LPORT(adpt); + } + fltr_info->data.tab_id = 5; + fltr_info->data.port = ADPT_VPORT(adpt); + fltr_info->data.cos = cpu_to_be16(rxq_idx); + fltr_info->data.hash = hash; + + arfs_entry->flow_id = flow_id; + fltr_info->fltr_id = + atomic_inc_return(adpt->arfs_last_fltr_id) % RPS_NO_FILTER; + + return arfs_entry; +} + +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + + if (!adpt) + return; + + netdev = adpt->netdev; + if (!netdev || !netdev->rx_cpu_rmap) + return; + + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +} + +static int ne6x_get_irq_num(struct ne6x_pf *pf, int idx) +{ + if (!pf->msix_entries) + return -EINVAL; + + return pf->msix_entries[idx].vector; +} + +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + struct ne6x_pf *pf; + int base_idx, i; + + pf = adpt->back; + + netdev = adpt->netdev; + if (!pf || !netdev || !adpt->num_q_vectors) + return -EINVAL; + + netdev_dbg(netdev, "Setup CPU RMAP: adpt type 0x%x, ifname %s, q_vectors %d\n", + adpt->type, netdev->name, adpt->num_q_vectors); + + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adpt->num_q_vectors); + if (unlikely(!netdev->rx_cpu_rmap)) + return -EINVAL; + + base_idx = adpt->base_vector; + for (i = 0; i < adpt->num_q_vectors; i++) { + if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, ne6x_get_irq_num(pf, base_idx + i))) { + ne6x_free_cpu_rx_rmap(adpt); + return -EINVAL; + } + } + + return 0; +} + +int ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_adapter *adpt = np->adpt; + struct flow_keys fk; + struct ne6x_pf *pf; + __be16 n_proto; + u8 ip_proto; + u16 idx; + u32 hash; + int ret; + + if (unlikely(!(netdev->features & NETIF_F_NTUPLE))) + return -ENODEV; + + /* failed to allocate memory for aRFS so don't crash */ + if (unlikely(!adpt->arfs_fltr_list)) + return -ENODEV; + + pf = adpt->back; + + /* aRFS only supported on Rx queues belonging to PF ADPT */ + if (rxq_idx >= adpt->num_queue) + return -EOPNOTSUPP; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; + + n_proto = fk.basic.n_proto; + /* Support only IPV4 and IPV6 */ + if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) || + n_proto == htons(ETH_P_IPV6)) + ip_proto = fk.basic.ip_proto; + else + return -EPROTONOSUPPORT; + + /* Support only TCP and UDP */ + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) + return -EPROTONOSUPPORT; + + /* choose the aRFS list bucket based on skb hash */ + hash = skb_get_hash_raw(skb); + idx = skb_get_hash_raw(skb) & NE6X_ARFS_LST_MASK; + /* search for entry in the bucket */ + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry(arfs_entry, &adpt->arfs_fltr_list[idx], + list_entry) { + struct ne6x_fster_fltr *fltr_info = &arfs_entry->fltr_info; + + /* keep searching for the already existing arfs_entry flow */ + if (!ne6x_arfs_cmp(fltr_info, &fk)) + continue; + + ret = fltr_info->fltr_id; + + if (fltr_info->q_index == rxq_idx || + arfs_entry->fltr_state != NE6X_ARFS_ACTIVE) + goto out; + + /* update the queue to forward to on an already existing flow */ + fltr_info->q_index = rxq_idx; + fltr_info->data.cos = cpu_to_be16(rxq_idx); + arfs_entry->fltr_state = NE6X_ARFS_INACTIVE; + ne6x_arfs_update_active_fltr_cntrs(adpt, arfs_entry, false); + goto out_schedule_service_task; + } + + arfs_entry = ne6x_arfs_build_entry(adpt, &fk, hash, rxq_idx, flow_id); + if (!arfs_entry) { + ret = -ENOMEM; + goto out; + } + + ret = arfs_entry->fltr_info.fltr_id; + INIT_HLIST_NODE(&arfs_entry->list_entry); + hlist_add_head(&arfs_entry->list_entry, &adpt->arfs_fltr_list[idx]); +out_schedule_service_task: + ne6x_service_event_schedule(pf); +out: + spin_unlock_bh(&adpt->arfs_lock); + return ret; +} + +static int ne6x_init_arfs_cntrs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return -EINVAL; + + adpt->arfs_fltr_cntrs = kzalloc(sizeof(*adpt->arfs_fltr_cntrs), + GFP_KERNEL); + if (!adpt->arfs_fltr_cntrs) + return -ENOMEM; + + adpt->arfs_last_fltr_id = kzalloc(sizeof(*adpt->arfs_last_fltr_id), + GFP_KERNEL); + if (!adpt->arfs_last_fltr_id) { + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; + return -ENOMEM; + } + + return 0; +} + +void ne6x_init_arfs(struct ne6x_adapter *adpt) +{ + struct hlist_head *arfs_fltr_list; + unsigned int i; + + if (!adpt) + return; + + arfs_fltr_list = kcalloc(NE6X_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), + GFP_KERNEL); + if (!arfs_fltr_list) + return; + + if (ne6x_init_arfs_cntrs(adpt)) + goto free_arfs_fltr_list; + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + INIT_HLIST_HEAD(&arfs_fltr_list[i]); + + spin_lock_init(&adpt->arfs_lock); + + adpt->arfs_fltr_list = arfs_fltr_list; + + return; + +free_arfs_fltr_list: + kfree(arfs_fltr_list); +} + +void ne6x_clear_arfs(struct ne6x_adapter *adpt) +{ + struct device *dev; + unsigned int i; + struct ne6x_arfs_entry *r; + struct hlist_node *n; + HLIST_HEAD(tmp_del_list); + + if (!adpt || !adpt->back || !adpt->arfs_fltr_list) + return; + + dev = ne6x_pf_to_dev(adpt->back); + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + if (r->fltr_state == NE6X_ARFS_ACTIVE || r->fltr_state == NE6X_ARFS_TODEL) { + hlist_del(&r->list_entry); + hlist_add_head(&r->list_entry, &tmp_del_list); + } + } + spin_unlock_bh(&adpt->arfs_lock); + } + + hlist_for_each_entry_safe(r, n, &tmp_del_list, list_entry) { + ne6x_dev_del_fster_rules(adpt, &r->fltr_info, false); + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + struct ne6x_arfs_entry *r; + struct hlist_node *n; + + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + spin_unlock_bh(&adpt->arfs_lock); + } + + kfree(adpt->arfs_fltr_list); + adpt->arfs_fltr_list = NULL; + kfree(adpt->arfs_last_fltr_id); + adpt->arfs_last_fltr_id = NULL; + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; +} + +void ne6x_remove_arfs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return; + + ne6x_clear_arfs(adpt); +} + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h new file mode 100644 index 00000000000000..a24d9f19d478f7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ARFS_H +#define _NE6X_ARFS_H + +/* protocol enumeration for filters */ +enum ne6x_fltr_ptype { + /* NONE - used for undef/error */ + NE6X_FLTR_PTYPE_NONF_NONE = 0, + NE6X_FLTR_PTYPE_NONF_IPV4_UDP, + NE6X_FLTR_PTYPE_NONF_IPV4_TCP, + NE6X_FLTR_PTYPE_NONF_IPV6_UDP, + NE6X_FLTR_PTYPE_NONF_IPV6_TCP, + NE6X_FLTR_PTYPE_MAX, +}; + +struct ne6x_fster_v4 { + __be32 rsv0[3]; + __be32 dst_ip; + __be32 rsv1[3]; + __be32 src_ip; + __be16 dst_port; + __be16 src_port; + __be16 rsv2; + u8 pi; + u8 proto; + u8 rsv3[24]; +}; + +#define NE6X_IPV6_ADDR_LEN_AS_U32 4 + +struct ne6x_fster_v6 { + __be32 dst_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be32 src_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be16 dst_port; + __be16 src_port; + __be16 rsv0; + u8 pi; + u8 proto; + u8 rsv1[24]; +}; + +struct ne6x_fster_data { + u8 tab_id; + u8 port; + __be16 cos; + __be32 hash; + u8 rsv0[24]; +}; + +struct ne6x_fster_table { + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_search_result { + u32 key_index; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_fltr { + struct list_head fltr_node; + enum ne6x_fltr_ptype flow_type; + + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; + + /* filter control */ + u16 q_index; + u16 dest_adpt; + u8 cnt_ena; + u16 cnt_index; + u32 fltr_id; +}; + +enum ne6x_arfs_fltr_state { + NE6X_ARFS_INACTIVE, + NE6X_ARFS_ACTIVE, + NE6X_ARFS_TODEL, +}; + +struct ne6x_arfs_entry { + struct ne6x_fster_fltr fltr_info; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + struct hlist_node list_entry; + u64 time_activated; /* only valid for UDP flows */ + u32 flow_id; + /* fltr_state = 0 - NE6X_ARFS_INACTIVE: + * filter needs to be updated or programmed in HW. + * fltr_state = 1 - NE6X_ARFS_ACTIVE: + * filter is active and programmed in HW. + * fltr_state = 2 - NE6X_ARFS_TODEL: + * filter has been deleted from HW and needs to be removed from + * the aRFS hash table. + */ + u8 fltr_state; +}; + +struct ne6x_arfs_entry_ptr { + struct ne6x_arfs_entry *arfs_entry; + struct hlist_node list_entry; +}; + +struct ne6x_arfs_active_fltr_cntrs { + atomic_t active_tcpv4_cnt; + atomic_t active_tcpv6_cnt; + atomic_t active_udpv4_cnt; + atomic_t active_udpv6_cnt; +}; + +#ifdef CONFIG_RFS_ACCEL +int +ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id); +void ne6x_clear_arfs(struct ne6x_adapter *adpt); +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_init_arfs(struct ne6x_adapter *adpt); +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf); +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_remove_arfs(struct ne6x_adapter *adpt); +#else +static inline void ne6x_clear_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) { } +static inline void ne6x_init_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) { } +static inline void ne6x_remove_arfs(struct ne6x_adapter *adpt) { } + +static inline int ne6x_set_cpu_rx_rmap(struct ne6x_adapter __always_unused *adpt) +{ + return 0; +} + +static inline int +ne6x_rx_flow_steer(struct net_device __always_unused *netdev, + const struct sk_buff __always_unused *skb, + u16 __always_unused rxq_idx, u32 __always_unused flow_id) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_RFS_ACCEL */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c new file mode 100644 index 00000000000000..a1e2f6aad70d28 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c @@ -0,0 +1,2409 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_debugfs.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +#define NE6X_CQ_TO_OFF_TX(__desc, __idx) \ + (((__desc)->payload.data[3 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[3 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_TX(__desc, __idx) ((__desc)->payload.data[3 * (__idx)]) + +#define NE6X_CQ_TO_LEN_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_RX(__desc, __idx) ((__desc)->payload.data[5 * (__idx)]) +#define NE6X_CQ_TO_OFF_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 3] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 4] << 8)) + +#define PARA_KEY_STRING " " +#define ARRAY_P_MAX_COUNT 140 +#define HASH_KEY_SIZE 64 +#define HASH_DATA_SIZE 64 +#define TABLE_WIDHT_BIT_512 512 +#define TABLE_WIDHT_BIT_128 128 +#define TABLE_WIDHT_BIT_64 64 +#define TABLE_WIDHT_BIT_16 16 +#define TABLE_WIDHT_BIT_256 256 +#define TABLE_WIDHT_BIT_32 32 + +#define FRU_CHECK_6ASCII(x) (((x) >> 6) == 0x2) +#define ASCII628_BASE 32 +#define FRU_6BIT_8BITLENGTH(x) (((x) * 4) / 3) + +static int table_size[] = { + TABLE_WIDHT_BIT_512, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_16, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_256, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_32 +}; + +static const struct ne6x_debug_info ne6x_device_info[] = { + {0xE220, "N5E025P2-PAUA", "25G"}, {0xE22C, "N5E025P2-NAUA", "25G"}, + {0xE221, "N5S025P2-PAUA", "25G"}, {0xE22D, "N5S025P2-NAUA", "25G"}, + {0xEA20, "N6E100P2-PAUA", "100G"}, {0xEA2C, "N6E100P2-NAUA", "100G"}, + {0xEA21, "N6S100P2-PAUA", "100G"}, {0xEA2D, "N6S100P2-NAUA", "100G"}, + {0xD221, "N6S025P2-PDUA", "25G"}, {0xDA21, "N6S100P2-PDUA", "100G"}, + {0x1220, "N5E025P2-PAGA", "25G"}, {0x122C, "N5E025P2-NAGA", "25G"}, + {0x1221, "N5S025P2-PAGA", "25G"}, {0x122D, "N5S025P2-NAGA", "25G"}, + {0x1A20, "N6E100P2-PAGA", "100G"}, {0x1A2C, "N6E100P2-NAGA", "100G"}, + {0x1A21, "N6S100P2-PAGA", "100G"}, {0x1A2D, "N6S100P2-NAGA", "100G"}, + {0x0221, "N6S100P2-NAGA", "100G"}, {0x0A21, "N6S100P2-PDGA", "100G"} }; + +static char *my_strtok(char *p_in_string, char *p_in_delimit, char **pp_out_ret) +{ + static char *p_tmp; + char *p_strstr = NULL; + char *ret = NULL; + int for_index; + + if (!pp_out_ret) + return NULL; + + *pp_out_ret = NULL; + if (!p_in_delimit) + return p_in_string; + + if (p_in_string) + p_tmp = p_in_string; + + if (!p_tmp) + return NULL; + + ret = p_tmp; + p_strstr = strstr(p_tmp, p_in_delimit); + if (p_strstr) { + p_tmp = p_strstr + strlen(p_in_delimit); + for (for_index = 0; for_index < strlen(p_in_delimit); for_index++) + *(p_strstr + for_index) = '\0'; + } else { + p_tmp = NULL; + } + + *pp_out_ret = p_tmp; + + return ret; +} + +static int my_isdigit(char in_char) +{ + if ((in_char >= '0') && (in_char <= '9')) + return 1; + else + return 0; +} + +static int my_atoi(char *p_in_string) +{ + int flag = 1; + int ret = 0; + + while (my_isdigit(p_in_string[0]) == 0) + p_in_string++; + + if (*(p_in_string - 1) == '-') + flag = -1; + + while (my_isdigit(p_in_string[0]) != 0) { + ret *= 10; + ret += p_in_string[0] - '0'; + if (ret > INT_MAX || ret < INT_MIN) + return 0; + + p_in_string++; + } + + if (ret != 0) + return (flag * ret); + else + return 0; +} + +static struct dentry *ne6x_dbg_root; +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len); + +static void ne6x_dbg_show_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "----RX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_alloc, + ring->next_to_use, ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "----TX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "----CQ: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +static void ne6x_dbg_show_ring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j, k, l; + union ne6x_rx_desc *rx_desc; + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, + rx_desc->wb.vp, rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_mop_addr) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, + tx_desc->chain, tx_desc->transmit_type, tx_desc->sop_valid, + tx_desc->eop_valid, tx_desc->tso, tx_desc->l3_csum, + tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, + "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", k, + ring->reg_idx, cq_desc->ctype, cq_desc->num); + for (l = 0; l < cq_desc->num; l++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, + "******[TX] %d:%d val:0x%x\n", l, + NE6X_CQ_TO_OFF_TX(cq_desc, l), + NE6X_CQ_TO_STS_TX(cq_desc, l)); + else + dev_info(&pf->pdev->dev, + "******[RX] %d:%d val:0x%x len:0x%x\n", l, + NE6X_CQ_TO_OFF_RX(cq_desc, l), + NE6X_CQ_TO_STS_RX(cq_desc, l), + NE6X_CQ_TO_LEN_RX(cq_desc, l)); + } + } + } + } +} + +static void ne6x_dbg_show_txtail(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j; + struct ne6x_adapter *adpt; + struct ne6x_ring *ring; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %llx packets\n", i, j, + readq(ring->tail + j)); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +static void ne6x_dbg_show_txq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +static void ne6x_dbg_show_rxq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] RX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +static void ne6x_dbg_show_cq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] CQ queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +static void ne6x_dbg_clean_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + rx_ring = adpt->rx_rings[j]; + cq_ring = adpt->cq_rings[j]; + + memset(&tx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&tx_ring->tx_stats, 0, sizeof(struct ne6x_txq_stats)); + + memset(&rx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&rx_ring->rx_stats, 0, sizeof(struct ne6x_rxq_stats)); + + memset(&cq_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&cq_ring->cq_stats, 0, sizeof(struct ne6x_cq_stats)); + } + dev_info(&pf->pdev->dev, "---------------------------adpt[%d] all ring cleaned---------------------------------------", + i); + } +} + +static void ne6x_dbg_show_txring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------tx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d, busy:%lld\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(tx_ring), + tx_ring->next_to_use, tx_ring->next_to_clean, + tx_ring->tx_stats.tx_busy); + } + } + dev_info(&pf->pdev->dev, "+----------------------------tx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +static void ne6x_dbg_show_rxring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *rx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------rx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + rx_ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], alloc:%04d, use:%04d, clean:%04d, cq_expect:%04d\n", + i, j, head, tail, oft, rx_ring->next_to_alloc, + rx_ring->next_to_use, rx_ring->next_to_clean, + rx_ring->cq_last_expect); + } + } + dev_info(&pf->pdev->dev, "+----------------------------rx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +static void ne6x_dbg_show_cqring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int queue_num = 0; + u64 head, tail; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------cq begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + cq_ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(cq_ring), cq_ring->next_to_use, + cq_ring->next_to_clean); + } + } + dev_info(&pf->pdev->dev, "+----------------------------cq end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +static void ne6x_dbg_show_txdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + + tx_ring = adpt->tx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-----------------------------------Netdev[%d] - Queue[%d] - tx_desc begin-----------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < tx_ring->count; i++) { + tx_desc = NE6X_TX_DESC(tx_ring, i); + if (!tx_desc->buffer_mop_addr && i != 0) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "tx_desc[%d]\n", i); + dev_info(&pf->pdev->dev, "struct ne6x_tx_desc\n" + "{\n" + " u8 flags : 8; [0x%x]\n" + " u8 vp : 7; [%d]\n" + " u8 event_trigger : 1; [%d]\n" + " u8 chain : 1; [%d]\n" + " u8 transmit_type : 2; [%d]\n" + " u8 sop_valid : 1; [%d]\n" + " u8 eop_valid : 1; [%d]\n" + " u8 tso : 1; [%d]\n" + " u8 l3_csum : 1; [%d]\n" + " u8 l3_ofst : 7; [%d]\n" + " u8 l4_csum : 1; [%d]\n" + " u8 l4_ofst : 7; [%d]\n" + " u8 pld_ofst; [%d]\n" + " __le64 mop_cnt : 24; [%d]\n" + " __le64 sop_cnt : 16; [%d]\n" + " __le64 mss : 16; [%d]\n" + " __le64 buffer_mop_addr; [%lld]\n" + " __le64 buffer_sop_addr; [%lld]\n" + "};\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, tx_desc->tso, + tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, tx_desc->mss, + tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + dev_info(&pf->pdev->dev, "+------------------------------------------------Netdev[%d] - Queue[%d] - tx_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +static void ne6x_dbg_show_rxdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + union ne6x_rx_desc *rx_desc = NULL; + struct ne6x_ring *rx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + rx_ring = adpt->rx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%2d] - rx_desc begin-------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < rx_ring->count; i++) { + rx_desc = NE6X_RX_DESC(rx_ring, i); + + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], p[0x%02x%02x%02x%02x%02x%02x%02x%02x], pkt_len[%d]\n", + adpt_num, queue_num, i, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, rx_desc->wb.vp, + rx_desc->wb.pd[0], rx_desc->wb.pd[1], rx_desc->wb.pd[2], rx_desc->wb.pd[3], + rx_desc->wb.pd[4], rx_desc->wb.pd[5], rx_desc->wb.pd[6], rx_desc->wb.pd[7], + rx_desc->wb.pkt_len); + } + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%d] - rx_desc end----------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +static void ne6x_dbg_show_cqdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_ring *cq_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i, j; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + cq_ring = adpt->cq_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc begin------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < cq_ring->count; i++) { + cq_desc = NE6X_CQ_DESC(cq_ring, i); + + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], cq_desc[%d], vp[%d], ctype[%s], num[%d]\n", + adpt_num, queue_num, i, cq_ring->reg_idx, + cq_desc->ctype == 0 ? "tx" : "rx", + cq_desc->num); + for (j = 0; j < cq_desc->num; j++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, "******TX%d[%d]: val:0x%x\n", j, + NE6X_CQ_TO_OFF_TX(cq_desc, j), + NE6X_CQ_TO_STS_TX(cq_desc, j)); + else + dev_info(&pf->pdev->dev, "******RX%d[%d]: val:0x%x len:%d\n", j, + NE6X_CQ_TO_OFF_RX(cq_desc, j), + NE6X_CQ_TO_STS_RX(cq_desc, j), + NE6X_CQ_TO_LEN_RX(cq_desc, j)); + } + } + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +#ifdef CONFIG_RFS_ACCEL +static void ne6x_dbg_show_arfs_cnt(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 idx = 0; + struct ne6x_adapter *pf_adpt; + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = NULL; + + ne6x_for_each_pf(pf, idx) { + pf_adpt = pf->adpt[idx]; + fltr_cntrs = pf_adpt->arfs_fltr_cntrs; + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + dev_info(&pf->pdev->dev, "pf_num:%d totle_num:%d\n\t\t\t tcp_v4_num:%d\n\t\t\t udp_v4_num:%d\n\t\t\t tcp_v6_num:%d\n\t\t\t udp_v6_num:%d\n", + idx, (atomic_read(&fltr_cntrs->active_tcpv4_cnt) + + atomic_read(&fltr_cntrs->active_udpv4_cnt) + + atomic_read(&fltr_cntrs->active_tcpv6_cnt) + + atomic_read(&fltr_cntrs->active_udpv6_cnt)), + atomic_read(&fltr_cntrs->active_tcpv4_cnt), + atomic_read(&fltr_cntrs->active_udpv4_cnt), + atomic_read(&fltr_cntrs->active_tcpv6_cnt), + atomic_read(&fltr_cntrs->active_udpv6_cnt)); + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + } +} +#endif + +extern u32 ne6x_dev_crc32(const u8 *buf, u32 size); + +static void ne6x_dbg_apb_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "apb_read \n"); + return; + } + + offset = addr; + value = ne6x_reg_apb_read(pf, offset); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +static void ne6x_dbg_apb_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "apb_write \n"); + return; + } + + offset = addr; + ne6x_reg_apb_write(pf, offset, value); + dev_info(&pf->pdev->dev, "apb_write: 0x%llx = 0x%x\n", offset, value); +} + +static void ne6x_dbg_mem_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int index = 0, cnt; + u32 *reg_data; + u64 offset; + u32 addr; + u32 size; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &size); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "mem_read \n"); + return; + } + + reg_data = kzalloc((size + 4) * 4, GFP_KERNEL); + offset = addr; + for (index = 0x00; index < size; index++) + reg_data[index] = ne6x_reg_apb_read(pf, offset + index * 4); + + for (index = 0x00; index < size / 4; index++) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2], reg_data[4 * index + 3]); + + if ((size % 4) == 1) + dev_info(&pf->pdev->dev, "%lx: %08X\n", (unsigned int long)(offset + index * 16), + reg_data[4 * index]); + else if ((size % 4) == 2) + dev_info(&pf->pdev->dev, "%lx: %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1]); + else if ((size % 4) == 3) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2]); + + kfree((void *)reg_data); +} + +static void ne6x_dbg_mem_write(struct ne6x_pf *pf, char *cmd_buf, int count) {} + +static void ne6x_dbg_templ_help(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + dev_info(&pf->pdev->dev, "HW_FEATURES = 0\n"); + dev_info(&pf->pdev->dev, "HW_FLAGS = 1\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_SIZE = 2\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_ENTRY_WIDTH = 3\n"); + dev_info(&pf->pdev->dev, "RSS_HASH_KEY_BLOCK_SIZE = 4\n"); + dev_info(&pf->pdev->dev, "PORT2PI_0 = 5\n"); + dev_info(&pf->pdev->dev, "PI2PORT_0 = 25\n"); + dev_info(&pf->pdev->dev, "VLAN_TYPE = 33\n"); + dev_info(&pf->pdev->dev, "PI0_BROADCAST_LEAF = 37\n"); + dev_info(&pf->pdev->dev, "PORT_OLFLAGS_0 = 53\n"); + dev_info(&pf->pdev->dev, "PORT_2_COS_0 = 121\n"); + dev_info(&pf->pdev->dev, "VPORT0_LINK_STATUS = 155\n"); + dev_info(&pf->pdev->dev, "TSO_CKSUM_DISABLE = 156\n"); + dev_info(&pf->pdev->dev, "PORT0_MTU = 157\n"); + dev_info(&pf->pdev->dev, "PORT0_QINQ = 161\n"); + dev_info(&pf->pdev->dev, "CQ_SIZE = 229\n"); +} + +static void ne6x_dbg_templ_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &vport, &type); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "temp_read \n"); + return; + } + + ne6x_reg_get_user_data(pf, vport + type, &value); + dev_info(&pf->pdev->dev, "temp_read 0x%04X value 0x%08X\n", type, value); +} + +static void ne6x_dbg_templ_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i %i", &vport, &type, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "temp_write \n"); + return; + } + + ne6x_reg_set_user_data(pf, vport + type, value); + dev_info(&pf->pdev->dev, "temp_write: 0x%04x = 0x%x\n", type, value); +} + +static void ne6x_dbg_soc_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "soc_read \n"); + return; + } + + ne6x_reg_indirect_read(pf, addr, &value); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +static void ne6x_dbg_soc_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "soc_write \n"); + return; + } + + ne6x_reg_indirect_write(pf, addr, value); + dev_info(&pf->pdev->dev, "soc_write: 0x%08X = 0x%08X\n", addr, value); +} + +static void ne6x_dbg_tab_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[10] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 2) { + dev_warn(&pf->pdev->dev, "tab_read \n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = strtoul(p_str_array[0], 0, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = strtoul(p_str_array[1], 0, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + table_info->size = table_size[table_info->table]; + ret = ne6x_reg_table_read(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success" : "timeout!"); + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +static void ne6x_dbg_set_mac_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + int ret; + int cnt; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:21:00:00:01\n"); + return; + } + + cnt = sscanf(&cmd_buf[2], "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX", &mac_addr[0], &mac_addr[1], + &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]); + if (cnt != 6) { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:24:00:00:01\n"); + return; + } + + if (port == 0) + memcpy(&psdk_spd_info->port_0_mac, &mac_addr, 6); + else if (port == 1) + memcpy(&psdk_spd_info->port_1_mac, &mac_addr, 6); + else if (port == 2) + memcpy(&psdk_spd_info->port_2_mac, &mac_addr, 6); + else if (port == 3) + memcpy(&psdk_spd_info->port_3_mac, &mac_addr, 6); + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(*psdk_spd_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(*psdk_spd_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "set mac success!" : "set mac fail!"); +} + +static void ne6x_dbg_get_mac(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "get_port_mac P0/P1\n"); + dev_warn(&pf->pdev->dev, "example-- get_port_mac P0\n"); + return; + } + + if (port == 0) + memcpy(&mac_addr, &psdk_spd_info->port_0_mac, 6); + else if (port == 1) + memcpy(&mac_addr, &psdk_spd_info->port_1_mac, 6); + else if (port == 2) + memcpy(&mac_addr, &psdk_spd_info->port_2_mac, 6); + else if (port == 3) + memcpy(&mac_addr, &psdk_spd_info->port_3_mac, 6); + else + return; + + dev_info(&pf->pdev->dev, "port %d: mac = %02x:%02x:%02x:%02x:%02x:%02x\n", port, + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); +} + +static void ne6x_dbg_set_dev_type_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 *p_str_array[10] = {0}; + int array_index = 0, ret; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + u16 dev_type = 0; + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 1) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) { + dev_type = strtoul(p_str_array[0], 0, 16); + } else { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (dev_type != NE6000AI_2S_X16H_25G_N5 && dev_type != NE6000AI_2S_X16H_25G_N6) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + psdk_spd_info->product_mode = cpu_to_be16(dev_type); + psdk_spd_info->is_pcie_exist = 0x1; + + if (dev_type == NE6000AI_2S_X16H_25G_N5) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000800); + } else if (dev_type == NE6000AI_2S_X16H_25G_N6) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000100); + } else { + return; + } + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write eeprom mac success!" : "write eeprom mac fail!"); +} + +static void ne6x_dbg_tab_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 8) { + dev_info(&pf->pdev->dev, "tab_write
...\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = strtoul(p_str_array[0], 0, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = strtoul(p_str_array[1], 0, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 2); index++) { + if (!strncmp(p_str_array[index + 2], "0x", 2)) + table_info->data[index] = strtoul(p_str_array[index + 2], 0, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 2]); + + table_info->size++; + } + + table_info->size = table_size[table_info->table]; + + ret = ne6x_reg_table_write(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +static void ne6x_dbg_tab_insert(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u32 table_id = 0xffffffff; + u32 temp_table[130]; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + + if (!p_tmp_ret) + break; + } + + /* 1 + 16 + 1+++ */ + if (array_index < 24) { + dev_warn(&pf->pdev->dev, "tab_insert
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = strtoul(p_str_array[0], 0, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = strtoul(p_str_array[index + 1], 0, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, temp_table, + table_info->size); + if (ret == -ENOENT) { + table_info->size = 64 + table_size[table_info->table]; + ret = ne6x_reg_table_insert(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + &table_id); + } else { + dev_info(&pf->pdev->dev, "0x%x 0x%x 0x%x 0x%x table exist\n", table_info->data[0], + table_info->data[1], table_info->data[2], table_info->data[3]); + return; + } + if (ret == 0) + dev_info(&pf->pdev->dev, "insert rule_id = 0x%x\n", table_id); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : + ((ret != -ETIMEDOUT) ? "fail!" : "timeout!")); +} + +static void ne6x_dbg_tab_delete(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = strtoul(p_str_array[0], 0, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = strtoul(p_str_array[index + 1], 0, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_delete(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +static void ne6x_dbg_tab_search(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + dev_info(&pf->pdev->dev, "array_index = %d\n", array_index); + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = strtoul(p_str_array[0], 0, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = strtoul(p_str_array[index + 1], 0, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "success!" : ((ret == -ENOENT) ? "not fount!" : "timeout!")); + if (ret) + return; + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +static void ne6x_dbg_get_fru_info(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct file *fp = NULL; + u8 *buffer; + int para_count; + u32 size; + + para_count = sscanf(&cmd_buf[0], "%i", &size); + if (para_count != 1) { + dev_warn(&pf->pdev->dev, "fru_read \n"); + return; + } + + if (size > 512) { + dev_warn(&pf->pdev->dev, "size must less than 512\n."); + return; + } + + buffer = kzalloc((size + 4), GFP_KERNEL); + ne6x_dev_get_fru(pf, (u32 *)buffer, size); + + fp = filp_open("/opt/share/fru.bin", O_RDWR | O_CREAT, 0644); + if (!fp) { + dev_err(&pf->pdev->dev, "can't open /opt/share/fru.bin.\n"); + return; + } + + kernel_write(fp, (char *)buffer, size, &fp->f_pos); + filp_close(fp, NULL); +} + +static u32 getparam(char *cmd_buf, u32 *param, int max_cnt) +{ + int ret, i, j, tmp, tmp1, tmp2, flag = 0; + u32 count = 0, cnt = 0, cnt_t = 0; + char *p = &cmd_buf[0]; + char *char_offset; + u32 *offset; + + offset = kzalloc((max_cnt + 1) * sizeof(u32), GFP_ATOMIC); + char_offset = kzalloc((max_cnt + 1) * sizeof(char), GFP_ATOMIC); + /* count the number */ + for (i = 0; i < strlen(cmd_buf); i++) { + if (cmd_buf[i] == ',' || cmd_buf[i] == '-') { + count++; + if (cmd_buf[i] == ',') { + offset[count] = i + 1; + char_offset[count] = ','; + } else if (cmd_buf[i] == '-') { + offset[count] = i + 1; + char_offset[count] = '-'; + } + } + if (cmd_buf[i] == ' ') + break; + + if (count >= max_cnt) + break; + } + + for (i = 0; i <= count; i++) { + ret = sscanf(p, "%i", ¶m[i + cnt_t]); + if (ret == 1) { + cnt++; + if (char_offset[cnt] == '-') { + flag++; + p = &cmd_buf[offset[cnt]]; + ret = sscanf(p, "%i", ¶m[i + cnt_t + 1]); + tmp1 = param[i + cnt_t]; + tmp2 = param[i + cnt_t + 1]; + if (ret == 1) { + tmp = i + cnt_t; + for (j = 0; j <= tmp2 - tmp1; j++) + param[tmp + j] = tmp1 + j; + } + cnt_t += tmp2 - tmp1; + + cnt++; + } + p = &cmd_buf[offset[cnt]]; + } + } + + kfree(offset); + + return cnt + cnt_t - 2 * flag; +} + +static void GetNextArg(char **Args, char *buffer) +{ + while (**Args == ' ' || **Args == '\t') + (*Args)++; + + while (**Args != ' ' && **Args != '\t' && **Args != '\0') + *buffer++ = *(*Args)++; + + *buffer = '\0'; +} + +static void ne6x_dbg_show_pcie_drop_counter(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + union ne6x_eth_recv_cnt eth_recv_cnt; + u64 __iomem *reg; + + reg = (void __iomem *)pf->hw.hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ETH_RECV_CNT); + eth_recv_cnt.val = readq(reg); + dev_info(&pf->pdev->dev, "pcie drop cnt = %d\n", eth_recv_cnt.reg.csr_eth_pkt_drop_cnt + + eth_recv_cnt.reg.csr_eth_rdq_drop_cnt); +} + +static void ne6x_dbg_clr_table(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 table_id = 0, cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &table_id); + if (table_id == 6) + ne6x_reg_clear_table(pf, table_id); +} + +static void ne6x_dbg_set_hw_flag_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + int flag = 0; + int ret; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &flag); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "\n0:none;1,ram white list;2,ddr white list\n"); + return; + } + + psdk_spd_info->hw_flag = cpu_to_be32(flag); + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "set hw_flag success!" + : "set hw_flag fail!"); +} + +static void ne6x_dbg_erase_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset; + u32 length; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_erase \n"); + return; + } + + if (!ne6x_reg_erase_norflash(pf, offset, length)) + return; + + dev_err(&pf->pdev->dev, "norflash_erase fail.\n"); +} + +static void ne6x_dbg_write_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *ptemp_data = NULL; + u32 offset = 0; + u32 length = 0; + u32 temp_data = 0; + u8 *ptemp = NULL; + int i = 0; + + ptemp_data = kzalloc(1024, GFP_ATOMIC); + + while ((ptemp = strsep(&cmd_buf, " "))) { + if (!strncmp(ptemp, "0x", 2)) + temp_data = strtoul(ptemp, 0, 16); + else + temp_data = my_atoi(ptemp); + + if (i == 0) + offset = temp_data; + else if (i == 1) + length = temp_data; + else + ptemp_data[i - 2] = (u8)temp_data; + + i++; + if (i == 1026) + break; + } + + if (length > 1024 || i < 2) { + dev_warn(&pf->pdev->dev, "norflash_write (byte split by space max 256)\n"); + goto pdata_memfree; + } + + if (!ne6x_reg_write_norflash(pf, offset, length, (u32 *)ptemp_data)) + dev_info(&pf->pdev->dev, "write norflash success.\n"); + else + dev_err(&pf->pdev->dev, "write norflash fail.\n"); + +pdata_memfree: + kfree(ptemp_data); +} + +static ne6x_dbg_meter_writevoid ne6x_dbg_read_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset = 0; + u32 length = 0; + u32 buffer_len; + char *pdata = NULL; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_read \n"); + return; + } + + buffer_len = length; + if (length % 4) + buffer_len = (length / 4 + 1) * 4; + + pdata = kzalloc(buffer_len, GFP_ATOMIC); + if (!ne6x_reg_read_norflash(pf, offset, buffer_len, (u32 *)pdata)) + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pdata, length); + else + dev_err(&pf->pdev->dev, "read_norflash fail.\n"); + + kfree(pdata); +} + +static void ne6x_dbg_meter_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + u32 cir, type_num, type_flag = 0; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + struct meter_table vf_bw; + char *p_tmp_ret; + int index, ret = 0; + int array_index = 0; + u8 *p_in_string = NULL; + u32 data[3] = {0}; + u32 type = 0; + + p_in_string = &cmd_buf[0]; + p_tmp_ret = NULL; + + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + if (!p_tmp_ret) + break; + } + if (array_index != 3) { + dev_warn(&pf->pdev->dev, "Incorrect input, please re-enter\n"); + return; + } + + for (index = 0; index < array_index; index++) { + if (!strncmp(p_str_array[index], "0x", 2)) + data[index] = strtoul(p_str_array[index], 0, 16); + else + data[index] = my_atoi(p_str_array[index]); + } + + type_num = data[0]; + switch (type_num) { + case 0: + type_flag |= NE6X_F_ACK_FLOOD; + break; + case 1: + type_flag |= NE6X_F_PUSH_ACK_FLOOD; + break; + case 2: + type_flag |= NE6X_F_SYN_ACK_FLOOD; + break; + case 3: + type_flag |= NE6X_F_FIN_FLOOD; + break; + case 4: + type_flag |= NE6X_F_RST_FLOOD; + break; + case 5: + type_flag |= NE6X_F_PUSH_SYN_ACK_FLOOD; + break; + case 6: + type_flag |= NE6X_F_UDP_FLOOD; + break; + case 7: + type_flag |= NE6X_F_ICMP_FLOOD; + break; + case 8: + type_flag |= NE6X_F_FRAGMENT_FLOOD; + break; + default: + dev_err(&pf->pdev->dev, "err_input,please enter one of'0-8'\n"); + return; + } + + if (data[1] == 1) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type |= type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else if (data[1] == 0) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type &= ~type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else { + dev_err(&pf->pdev->dev, "Input error, please enter '0' or '1'\n"); + return; + } + + cir = data[2] * 1000 + 1023; + cir = min((cir / 1024), cir_maxnum); + vf_bw.cir = cir; + vf_bw.pir = min((cir + cir / 10), cir_maxnum); + + vf_bw.cbs = min((vf_bw.cir * 10000), cbs_maxnum); + vf_bw.pbs = min((vf_bw.pir * 10000), cbs_maxnum); + ret = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | type_num, + (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write meter success!" : "write meter fail!"); +} + +const struct ne6x_dbg_cmd_wr deg_cmd_wr[] = { + {"queue", ne6x_dbg_show_queue}, + {"ring", ne6x_dbg_show_ring}, + {"txq", ne6x_dbg_show_txq}, + {"rxq", ne6x_dbg_show_rxq}, + {"cq", ne6x_dbg_show_cq}, + {"clean", ne6x_dbg_clean_queue}, + {"txtail", ne6x_dbg_show_txtail}, + {"txr", ne6x_dbg_show_txring}, + {"rxr", ne6x_dbg_show_rxring}, + {"cqr", ne6x_dbg_show_cqring}, +#ifdef CONFIG_RFS_ACCEL + {"arfs", ne6x_dbg_show_arfs_cnt}, +#endif + {"apb_read", ne6x_dbg_apb_read}, + {"apb_write", ne6x_dbg_apb_write}, + {"mem_read", ne6x_dbg_mem_read}, + {"mem_write", ne6x_dbg_mem_write}, + {"soc_read", ne6x_dbg_soc_read}, + {"soc_write", ne6x_dbg_soc_write}, + {"templ_help", ne6x_dbg_templ_help}, + {"templ_read", ne6x_dbg_templ_read}, + {"templ_write", ne6x_dbg_templ_write}, + {"tab_read", ne6x_dbg_tab_read}, + {"tab_write", ne6x_dbg_tab_write}, + {"tab_insert", ne6x_dbg_tab_insert}, + {"tab_delete", ne6x_dbg_tab_delete}, + {"tab_search", ne6x_dbg_tab_search}, + {"set_port_mac", ne6x_dbg_set_mac_to_eeprom}, + {"get_port_mac", ne6x_dbg_get_mac}, + {"fru_read", ne6x_dbg_get_fru_info}, + {"pcie_dropcnt", ne6x_dbg_show_pcie_drop_counter}, + {"clear_table", ne6x_dbg_clr_table}, + {"set_hw_flag", ne6x_dbg_set_hw_flag_eeprom}, + {"norflash_erase", ne6x_dbg_erase_norflash}, + {"norflash_write", ne6x_dbg_write_norflash}, + {"norflash_read", ne6x_dbg_read_norflash}, + {"meter_write", ne6x_dbg_meter_write}, +}; + +/** + * ne6x_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +static ssize_t ne6x_dbg_info_pnsn_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + u8 *pru_name = NULL, *pru_pn = NULL, *pru_sn = NULL; + char name_pre[INFO_COL] = {0}; + char name_aft[INFO_COL] = {0}; + struct ne6x_pf *pf = NULL; + u32 buf_size = 500; + char *name = NULL; + ssize_t len = 0; + u8 *buffer_data; + u8 length = 0; + u16 device_id; + int erro = 0; + int dex = 0; + int i = 0; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + name = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!name) + return -ENOMEM; + + buffer_data = kzalloc(buf_size, GFP_KERNEL); + if (!buffer_data) { + kfree(name); + return -ENOMEM; + } + + pf = filp->private_data; + ne6x_dev_get_fru(pf, (u32 *)buffer_data, buf_size); + + pru_name = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_NAME, &length); + if (!pru_name) { + dev_err(&pf->pdev->dev, "get pru_name info erro"); + device_id = pf->hw.subsystem_device_id; + if (!device_id) { + dev_err(&pf->pdev->dev, "subsystem_device_id is NULL!"); + erro = 1; + goto get_buffer_end; + } + + sprintf(name_pre, "Product Name: BeiZhongWangXin"); + sprintf(name_aft, "Ethernet Adapter"); + + for (i = 0; i < ARRAY_SIZE(ne6x_device_info); i++) { + if (device_id == ne6x_device_info[i].system_id) + dex = i; + } + + if (dex != -1) { + len = sprintf(name, "%s %s %s %s\n", name_pre, + ne6x_device_info[dex].system_name, + ne6x_device_info[dex].system_speed, name_aft); + } else { + dev_warn(&pf->pdev->dev, "subsystem_device_id not match"); + erro = 1; + goto get_buffer_end; + } + + } else { + len = sprintf(name, "Product Name: %s\n", pru_name); + } + + pru_pn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_PART_NUMBER, &length); + if (pru_pn) + len = sprintf(name, "%s[PN] Part number: %s\n", name, pru_pn); + + pru_sn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_SERIAL_NUMBER, &length); + if (pru_sn) + len = sprintf(name, "%s[SN] Serial number: %s\n", name, pru_sn); + + if (copy_to_user(buffer, name, len)) { + erro = 2; + goto get_buffer_end; + } + + if (!len) { + erro = 1; + goto get_buffer_end; + } + + *ppos = len; + goto get_buffer_end; + +get_buffer_end: + kfree(pru_pn); + kfree(pru_sn); + kfree(pru_name); + kfree(name); + kfree(buffer_data); + + if (erro == 1) + return 0; + else if (erro == 2) + return -EFAULT; + + return len; +} + +static bool ne6x_dbg_fru_checksum(const u8 *data, u32 len) +{ + u8 gl = 0; + u32 i; + + for (i = 0; i < len - 1; i++) + gl += data[i]; + + gl = ~gl + 1; + return gl == data[len - 1]; +} + +static int ne6x_dbg_fru_get_offset(u8 *buffer, enum fru_type type, u8 *offset) +{ + u8 hd[8] = {0}; + int i; + + for (i = 0; i < 8; i++) + hd[i] = buffer[i]; + + if (!(hd[0] & 0x1)) + return -2; + + if (!ne6x_dbg_fru_checksum(hd, 8)) + return -3; + + if (type < INTER_USE_AREA || type > MUILT_AREA) + return -4; + + *offset = hd[type + 1]; + + return 0; +} + +static u8 *ne6x_dbg_fru_6ascii28(const u8 *data, u8 *len) +{ + u8 len_bit_6, len_bit_8; + int i, i6, byte; + u8 *buf = NULL; + + len_bit_6 = data[0] & 0x3F; + len_bit_8 = FRU_6BIT_8BITLENGTH(len_bit_6); + buf = kzalloc(len_bit_8 + 1, GFP_ATOMIC); + + if (!buf) { + *len = 0; + return NULL; + } + + for (i = 0, i6 = 1; i6 <= len_bit_6 && i < len_bit_8 && data[i6]; i++) { + byte = (i - 1) % 4; + + switch (byte) { + case 0: + buf[i] = data[i6] & 0x3F; + break; + case 1: + buf[i] = (data[i6] >> 6) | (data[1 + i6] << 2); + i6++; + break; + case 2: + buf[i] = (data[i6] >> 4) | (data[1 + i6] << 4); + i6++; + break; + case 3: + buf[i] = data[i6++] >> 2; + break; + } + + buf[i] &= 0x3F; + buf[i] += ASCII628_BASE; + } + + *len = len_bit_8; + + return buf; +} + +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len) +{ + u8 hd[2] = {0}; + u8 *pt = NULL; + u8 ofst = 0; + u32 i = 0; + + if (!buffer) + return NULL; + + if (ne6x_dbg_fru_get_offset(buffer, PRODUCT_AREA, &ofst) != 0 || ofst == 0) { + *len = 0; + return NULL; + } + + ofst *= 8; + hd[0] = buffer[ofst]; + hd[1] = buffer[ofst + 1]; + if (!(hd[0] & 0x1) || hd[1] == 0) + return NULL; + + if (!ne6x_dbg_fru_checksum(&buffer[ofst], hd[1] * 8)) + return NULL; + + ofst += 3; + + for (i = 0; i < part; i++) + ofst += 1 + (buffer[ofst] & 0x3f); + + if (FRU_CHECK_6ASCII(buffer[ofst])) { + pt = ne6x_dbg_fru_6ascii28(&buffer[ofst], len); + } else { + *len = (buffer[ofst] & 0x3f); + pt = kzalloc(*len, GFP_ATOMIC); + if (!pt) + return NULL; + + memcpy(pt, &buffer[ofst + 1], *len); + } + + return pt; +} + +static void ne6x_dbg_update_adpt_speed(struct ne6x_adapter *adpt, u32 speed, u32 lane_mode) {} + +/** + * ne6x_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + struct ne6x_ring *tx_ring; + int bytes_not_copied; + struct ne6x_adapter *adpt; + int i, cnt = 0; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "updtail", 7) == 0) { + int idx, vp, tail; + + cnt = sscanf(&cmd_buf[7], "%d %d %d", &idx, &vp, &tail); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "updtail \n"); + goto command_write_done; + } + adpt = pf->adpt[idx ? 1 : 0]; + tx_ring = adpt->tx_rings[vp & 0xF]; + ne6x_tail_update(tx_ring, tail); + dev_info(&pf->pdev->dev, "write: adpt = %d vp = 0x%x tail_ptr = %d\n", idx ? 1 : 0, + vp, tail); + } else if (strncmp(cmd_buf, "memrd", 5) == 0) { + u32 base_addr; + u32 offset_addr = 0; + u64 value; + int index, vp; + + cnt = sscanf(&cmd_buf[5], "%d", &vp); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "memrd \n"); + goto command_write_done; + } + + offset_addr = 0x0; + for (index = 0; index < 0x20; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + if (base_addr == 0x13F) { + offset_addr = 0x21; + for (index = 0x21; index < 0x24; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x39; + for (index = 0x39; index < 0x4E; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x80; + for (index = 0x80; index < 0x95; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0xA3; + for (index = 0xA3; index < 0xA5; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + } + } else if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + + ne6x_reg_pci_write(pf, base_addr, offset_addr, value); + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "wr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i %i", &offset, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rr \n"); + goto command_write_done; + } + ne6x_reg_indirect_write(pf, offset, value); + dev_info(&pf->pdev->dev, "wr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "rr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i", &offset); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_indirect_read(pf, offset, &value); + dev_info(&pf->pdev->dev, "rr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "txd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "txd \n"); + goto command_write_done; + } + + ne6x_dbg_show_txdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "rxd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rxd \n"); + goto command_write_done; + } + + ne6x_dbg_show_rxdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "cqd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "cqd \n"); + goto command_write_done; + } + + ne6x_dbg_show_cqdesc_states(adpt_num, quenue_num, pf); + } else { + for (i = 0; i < count; i++) { + if (cmd_buf[i] == ' ') { + cmd_buf[i] = '\0'; + cnt = i; + break; + } + if (cmd_buf[i] == '\0') { + cnt = i; + break; + } + } + + for (i = 0; i < ARRAY_SIZE(deg_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_cmd_wr[i].command, cnt) == 0) { + deg_cmd_wr[i].command_proc(pf, &cmd_buf[cnt + 1], count - cnt - 1); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_command_read, + .write = ne6x_dbg_command_write, +}; + +const struct ne6x_dbg_cmd_wr deg_netdev_ops_cmd_wr[] = {}; + +/** + * ne6x_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static const struct file_operations ne6x_dbg_info_pnsn_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_info_pnsn_read, +}; + +static const struct file_operations ne6x_dbg_info_tps_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_proc_tps_read, +}; + +static ssize_t ne6x_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return 0; +} + +/** + * ne6x_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + for (i = 0; i < ARRAY_SIZE(deg_netdev_ops_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_netdev_ops_cmd_wr[i].command, count) == 0) { + deg_netdev_ops_cmd_wr[i].command_proc(pf, + &cmd_buf[sizeof(deg_netdev_ops_cmd_wr[i].command) + 1], + count - 1 - sizeof(deg_netdev_ops_cmd_wr[i].command)); + goto command_write_done; + } + } + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_netdev_ops_read, + .write = ne6x_dbg_netdev_ops_write, +}; + +/** + * ne6x_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6x_dbg_pf = debugfs_create_dir(name, ne6x_dbg_root); + if (!pf->ne6x_dbg_pf) + return; + + pf->ne6x_dbg_info_pf = debugfs_create_dir("info", pf->ne6x_dbg_pf); + if (!pf->ne6x_dbg_info_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6x_dbg_pf, pf, &ne6x_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->ne6x_dbg_pf, pf, + &ne6x_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("product_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_pnsn_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("power_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_tps_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + debugfs_remove_recursive(pf->ne6x_dbg_pf); +} + +/** + * ne6x_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + pf->ne6x_dbg_info_pf = NULL; + + debugfs_remove_recursive(pf->ne6x_dbg_pf); + pf->ne6x_dbg_pf = NULL; +} + +/** + * ne6x_dbg_init - start up debugfs for the driver + **/ +void ne6x_dbg_init(void) +{ + ne6x_dbg_root = debugfs_create_dir(ne6x_driver_name, NULL); + if (!ne6x_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * ne6x_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6x_dbg_exit(void) +{ + debugfs_remove_recursive(ne6x_dbg_root); + ne6x_dbg_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h new file mode 100644 index 00000000000000..2094e52f4b6d19 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEBUGFS_H +#define _NE6X_DEBUGFS_H + +struct ne6x_debug_table { + int table; + int index; + int size; + u32 data[128]; +}; + +#ifdef CONFIG_DEBUG_FS + +enum fru_product_part { + MANUFACTURER_NAME = 0, + PRODUCT_NAME, + PRODUCT_PART_NUMBER, /* pn */ + PRODUCT_VERSION, + PRODUCT_SERIAL_NUMBER, /* sn */ + PRODUCT_ASSET_TAG, + PRODUCT_FRU_FILE_ID, +}; + +enum fru_type { + INTER_USE_AREA = 0, + CHASSIS_AREA, + BOARD_AREA, + PRODUCT_AREA, + MUILT_AREA, +}; + +#define NE6X_DEBUG_CHAR_LEN 1024 + +#define INFO_ROW 20 +#define INFO_COL 50 + +extern char ne6x_driver_name[]; + +struct ne6x_dbg_cmd_wr { + char command[NE6X_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6x_pf *pf, char *cmd_buf, int count); +}; + +struct ne6x_debug_info { + u16 system_id; + char system_name[INFO_COL]; + char system_speed[INFO_COL]; +}; + +void ne6x_dbg_init(void); +void ne6x_dbg_exit(void); + +void ne6x_dbg_pf_init(struct ne6x_pf *pf); +void ne6x_dbg_pf_exit(struct ne6x_pf *pf); +#else /* !CONFIG_DEBUG_FS */ + +static inline void ne6x_dbg_init(void) +{ } +static inline void ne6x_dbg_exit(void) +{ } +static inline void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ } +static inline void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ } +#endif /* end CONFIG_DEBUG_FS */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c new file mode 100644 index 00000000000000..fc8ae8de80dd7d --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c @@ -0,0 +1,1558 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "reg.h" + +#define NE6X_SDK_CRC32_DATA_LEN 256 + +#define NE6X_PPORT_BY_HWINFO(HWINFO, index) (((HWINFO) >> (8 * (index))) & 0xff) + +#define to_be32_vector(s, e, p) \ +({ \ + int __n; \ + u32 *__data = (u32 *)(p);\ + for (__n = (s); __n < (e); __n++) \ + __data[__n] = cpu_to_be32(__data[__n]); \ +}) + +static void ext_toeplitz_key(const unsigned char *key, unsigned char *ext_key) +{ + int i; + + for (i = 0; i < 39; i++) { + ext_key[i] = key[i]; + ext_key[44 + i] = (key[i] << 1) | (key[i + 1] >> 7); + ext_key[44 * 2 + i] = (key[i] << 2) | (key[i + 1] >> 6); + ext_key[44 * 3 + i] = (key[i] << 3) | (key[i + 1] >> 5); + ext_key[44 * 4 + i] = (key[i] << 4) | (key[i + 1] >> 4); + ext_key[44 * 5 + i] = (key[i] << 5) | (key[i + 1] >> 3); + ext_key[44 * 6 + i] = (key[i] << 6) | (key[i + 1] >> 2); + ext_key[44 * 7 + i] = (key[i] << 7) | (key[i + 1] >> 1); + } + + ext_key[39] = key[39]; + ext_key[44 + 39] = (key[39] << 1) | (key[1] >> 7); + ext_key[44 * 2 + 39] = (key[39] << 2) | (key[1] >> 6); + ext_key[44 * 3 + 39] = (key[39] << 3) | (key[1] >> 5); + ext_key[44 * 4 + 39] = (key[39] << 4) | (key[1] >> 4); + ext_key[44 * 5 + 39] = (key[39] << 5) | (key[1] >> 3); + ext_key[44 * 6 + 39] = (key[39] << 6) | (key[1] >> 2); + ext_key[44 * 7 + 39] = (key[39] << 7) | (key[1] >> 1); + + for (i = 0; i < 4; i++) { + ext_key[40 + i] = ext_key[i]; + ext_key[44 + 40 + i] = ext_key[44 + i]; + ext_key[44 * 2 + 40 + i] = ext_key[44 * 2 + i]; + ext_key[44 * 3 + 40 + i] = ext_key[44 * 3 + i]; + ext_key[44 * 4 + 40 + i] = ext_key[44 * 4 + i]; + ext_key[44 * 5 + 40 + i] = ext_key[44 * 5 + i]; + ext_key[44 * 6 + 40 + i] = ext_key[44 * 6 + i]; + ext_key[44 * 7 + 40 + i] = ext_key[44 * 7 + i]; + } +} + +static u32 ne6x_dev_bitrev(u32 input, int bw) +{ + u32 var = 0; + int i; + + for (i = 0; i < bw; i++) { + if (input & 0x01) + var |= 1 << (bw - 1 - i); + + input >>= 1; + } + + return var; +} + +static void ne6x_dev_crc32_init(u32 poly, u32 *table) +{ + u32 c; + int i, j; + + poly = ne6x_dev_bitrev(poly, 32); + + for (i = 0; i < NE6X_SDK_CRC32_DATA_LEN; i++) { + c = i; + for (j = 0; j < 8; j++) { + if (c & 1) + c = poly ^ (c >> 1); + else + c = c >> 1; + } + table[i] = c; + } +} + +u32 ne6x_dev_crc32(const u8 *buf, u32 size) +{ + u32 ne6x_sdk_crc32tab[NE6X_SDK_CRC32_DATA_LEN]; + u32 i, crc; + + ne6x_dev_crc32_init(0x4C11DB7, ne6x_sdk_crc32tab); + crc = 0xFFFFFFFF; + + for (i = 0; i < size; i++) + crc = ne6x_sdk_crc32tab[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + + return crc ^ 0xFFFFFFFF; +} + +static int ne6x_dev_spd_verify(struct ne6x_dev_eeprom_info *spd_info) +{ + if (be32_to_cpu(spd_info->spd_verify_value) == + ne6x_dev_crc32((const u8 *)spd_info, sizeof(*spd_info) - 4)) + return 0; + + return -EINVAL; +} + +static int ne6x_dev_get_eeprom(struct ne6x_pf *pf) +{ + int retry = 3; + + while (retry-- > 0) { + ne6x_reg_e2prom_read(pf, 0x0, (u8 *)&pf->sdk_spd_info, sizeof(pf->sdk_spd_info)); + if (!ne6x_dev_spd_verify(&pf->sdk_spd_info)) + return 0; + } + + memset(&pf->sdk_spd_info, 0, sizeof(pf->sdk_spd_info)); + + return -EINVAL; +} + +static int ne6x_dev_get_dev_info(struct ne6x_pf *pf) +{ + int ret; + + ret = ne6x_dev_get_eeprom(pf); + if (!ret) { + pf->dev_type = be16_to_cpu(pf->sdk_spd_info.product_mode); + pf->hw_flag = be32_to_cpu(pf->sdk_spd_info.hw_flag); + if (!pf->hw_flag) + pf->hw_flag = 1; + } else { + dev_err(ne6x_pf_to_dev(pf), "get eeprom fail\n"); + } + + return ret; +} + +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + if (pf->hw_flag == 1 || pf->hw_flag == 2) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + dev_info(ne6x_pf_to_dev(pf), "hw not support white list func\n"); + return -ENOTSUPP; + } + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } + + return 0; +} + +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + if (data & NE6X_F_TRUST_VLAN_ENABLED) + return true; + return false; +} + +int ne6x_dev_get_pport(struct ne6x_adapter *adpt) +{ + u32 lport_to_phy; + + if (!adpt) + return 0; + + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + return adpt->idx; + default: + break; + } + + lport_to_phy = adpt->back->sdk_spd_info.logic_port_to_phyical; + + return NE6X_PPORT_BY_HWINFO(be32_to_cpu(lport_to_phy), adpt->idx); +} + +static void ne6x_dev_set_roce_icrc_offload(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +int ne6x_dev_init(struct ne6x_pf *pf) +{ + if (unlikely(ne6x_dev_get_dev_info(pf))) + return -EINVAL; + + ne6x_reg_get_ver(pf, &pf->verinfo); + ne6x_dev_clear_vport(pf); + ne6x_dev_set_fast_mode(pf, false, 0); + ne6x_dev_set_roce_icrc_offload(pf, true); + + return 0; +} + +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac) +{ + struct ne6x_dev_eeprom_info *info = &adpt->back->sdk_spd_info; + + memset(mac, 0, 6); + switch (adpt->idx) { + case 0: + ether_addr_copy(mac, &info->port_0_mac[0]); + break; + case 1: + ether_addr_copy(mac, &info->port_1_mac[0]); + break; + case 2: + ether_addr_copy(mac, &info->port_2_mac[0]); + break; + case 3: + ether_addr_copy(mac, &info->port_3_mac[0]); + break; + default: + return -1; + } + + return 0; +} + +int ne6x_dev_get_port_num(struct ne6x_pf *pf) +{ + return pf->sdk_spd_info.number_of_physical_controllers; +} + +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_TEMPERATURE, (u32 *)temp, sizeof(*temp)); +} + +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_POWER_CONSUM, (u32 *)power, sizeof(*power)); +} + +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_I2C3_TEST, (u32 *)id, sizeof(u32)); +} + +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_FRU, buffer, size); +} + +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_DDR_TEST, NULL, 0); +} + +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_read(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_write(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status) +{ + u32 link_speed = ne6x_reg_apb_read(adpt->back, 0x2087FB00 + 4 * ADPT_LPORT(adpt)); + + status->link = link_speed >> 16; + status->speed = link_speed & 0xffff; + + return 0; +} + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status) +{ + u32 sfp_state; + + sfp_state = ne6x_reg_apb_read(adpt->back, 0x2087FB40 + 4 * ADPT_LPORT(adpt)); + *status = sfp_state & 0x1; + + return 0; +} + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LINK_STATUS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)verify, sizeof(int)); +} + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt) +{ + return ne6x_reg_reset_firmware(adpt->back); +} + +static static int ne6x_dev_get_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(fctrl)); +} + +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(*fctrl)); +} + +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&adpt->stats, sizeof(adpt->stats)); +} + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu) +{ + u32 max_length = mtu + 18; + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); +} + +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu) +{ + u32 max_length; + int ret; + + ret = ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); + *mtu = max_length - 18; + + return ret; +} + +static int fastlog2(int x) +{ + int idx; + + for (idx = 31; idx >= 0; idx--) { + if (x & (1 << idx)) + break; + } + + return idx; +} + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *cfg) +{ + struct rss_table rss; + u32 *rss_data = (u32 *)&rss; + int ret, i; + + memset(&rss, 0x00, sizeof(rss)); + rss.flag = cpu_to_be32(0x01); /* valid bit */ + rss.hash_fun = (cfg->hash_func << 24) & 0xFF000000; + rss.hash_fun |= (cfg->hash_type & 0xFFFFFF); + rss.hash_fun = cpu_to_be32(rss.hash_fun); + rss.queue_base = cpu_to_be32(ADPT_VPORTCOS(adpt)); + rss.queue_def = cpu_to_be16(0x0); + rss.queue_size = cpu_to_be16(adpt->num_queue); + rss.entry_num = fastlog2(cfg->ind_table_size); + rss.entry_num = cpu_to_be16(rss.entry_num); + rss.entry_size = cpu_to_be16(0x0); + + for (i = 0; i < cfg->ind_table_size; i++) + rss.entry_data[i] = cfg->ind_table[i]; + + ext_toeplitz_key(&cfg->hash_key[0], &rss.hash_key[0]); + + for (i = 0; i < 128; i++) + rss_data[i] = cpu_to_be32(rss_data[i]); + + ret = ne6x_reg_table_write(adpt->back, NE6X_REG_RSS_TABLE, ADPT_VPORT(adpt), + (void *)&rss, sizeof(rss)); + return ret; +} + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags) +{ + int ret; + + clear_bit(NE6X_LINK_POOLING, adpt->back->state); + ret = ne6x_reg_upgrade_firmware(adpt->back, region, data, size); + set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return ret; +} + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_TYPE_LEN, NE6X_TALK_GET, + ADPT_LPORT(adpt), sfp_mode, sizeof(*sfp_mode)); +} + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags) +{ + return ne6x_reg_get_sfp_eeprom(adpt->back, ADPT_LPORT(adpt), data, offset, size); +} + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_SET, + ADPT_LPORT(adpt), NULL, 0); +} + +/* panel port mapped to logical port */ +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt) +{ + u32 val = (ADPT_LPORT(adpt) << 24) | (ADPT_VPORT(adpt) << 16) | + (adpt->port_info->hw_queue_base + 160); + + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT2PI_0 + ADPT_PPORT(adpt)), val); +} + +/* logical port mapped to panel port */ +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt) +{ + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI2PORT_0 + ADPT_LPORT(adpt)), + ADPT_PPORT(adpt)); +} + +/* clear vport map */ +void ne6x_dev_clear_vport(struct ne6x_pf *pf) +{ + int idx; + + for (idx = 0; idx < 32; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_2_COS_0 + idx), 0); + + for (idx = 0; idx < 64; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_OLFLAGS_0 + idx), 0); +} + +/* automatically generating vp_base_cos */ +int ne6x_dev_set_vport(struct ne6x_adapter *adpt) +{ + u16 port = adpt->vport >> 1; + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), &val); + + /* pf base cos */ + if (adpt->vport & 0x1) { + val &= 0xFFFF; + val |= ((adpt->port_info->hw_queue_base + 160) << 16); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } else { + val &= 0xFFFF0000; + val |= (adpt->port_info->hw_queue_base + 160); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } + + return 0; +} + +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + int ret; + + PBMP_CLEAR(new_pbmp); + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, + sizeof(pbmp_t)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + return ret; +} + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + + PBMP_CLEAR(new_pbmp); + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, sizeof(pbmp_t)); +} + +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp_t)); + memset(new_pbmp, 0, sizeof(pbmp_t)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_ADD(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp)); + memset(new_pbmp, 0, sizeof(pbmp)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_REMOVE(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +/* clear vlan table */ +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf) +{ + pbmp_t pbmp; + int index; + + PBMP_CLEAR(pbmp); + for (index = 0; index < 8192; index++) + ne6x_reg_table_write(pf, NE6X_REG_VLAN_TABLE, index, (void *)pbmp, sizeof(pbmp)); + + return 0; +} + +/* port add qinq */ +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + struct ne6x_vf_vlan vlan; + u32 val = 0; + + memset(&vlan, 0, sizeof(vlan)); + + vlan.tpid = proto; + vlan.vid = vid; + + memcpy(&val, &vlan, sizeof(u32)); + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), val); + + return 0; +} + +/* port del qinq */ +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), 0); + + return 0; +} + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_PROMISC; + else + val &= ~NE6X_F_PROMISC; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_RX_ALLMULTI; + else + val &= ~NE6X_F_RX_ALLMULTI; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +static void ne6x_dev_update_uc_leaf(struct l2fdb_dest_unicast *unicast, struct ne6x_adapter *adpt, + bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(unicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(unicast->vp_bmp[vport / 32], vport % 32); + + unicast->cnt = 0; +} + +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_fast_table db; + + memcpy(&db.mac[0], mac, 6); + db.start_cos = ADPT_VPORTCOS(adpt); + db.cos_num = adpt->num_queue; + + to_be32_vector(0, sizeof(db) / 4, &db); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + if (adpt->back->is_fastmode) + ne6x_dev_add_unicast_for_fastmode(adpt, mac); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 16, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + db.vlanid = 0; + + memset(&db.fw_info.unicast, 0, sizeof(db.fw_info.unicast)); + db.fw_info.unicast.flags = 0x1; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert unicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +static int ne6x_dev_del_unicast_for_fastmode(struct ne6x_adapter *adpt) +{ + struct l2fdb_fast_table db; + + memset(&db, 0, sizeof(db)); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret = 0; + + if (adpt->back->is_fastmode) + ne6x_dev_del_unicast_for_fastmode(adpt); + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return 0; +} + +static void ne6x_dev_update_mc_leaf(struct l2fdb_dest_multicast *multicast, + struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(multicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(multicast->vp_bmp[vport / 32], vport % 32); +} + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + + memset(&db.fw_info.multicast, 0, sizeof(db.fw_info.multicast)); + db.fw_info.multicast.flags = 0x3; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert multicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret; + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + /* hash_key */ + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + /* mac info */ + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return ret; +} + +inline void ne6x_dev_update_boradcast_leaf(u32 *leaf, struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(*leaf, vport % 32) : CLR_BIT(*leaf, vport % 32); +} + +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, true); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, false); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt) +{ + int val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + return val; +} + +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 val) +{ + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (enable) + val |= NE6X_F_RSS; + else + val &= ~NE6X_F_RSS; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&fec, sizeof(int)); +} + +static int ne6x_dev_set_mac_inloop(struct ne6x_adapter *adpt, int enable) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LOOPBACK, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&enable, sizeof(int)); +} + +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fec, sizeof(int)); +} + +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&state, sizeof(u32)); +} + +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)state, sizeof(u32)); +} + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_stop(pf, flag); +} + +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_start(pf, flag); +} + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state) +{ + return ne6x_reg_set_led(adpt->back, ADPT_LPORT(adpt), state); +} + +static void ne6x_dev_transform_vf_stat_format(u32 *stat_arr, struct vf_stat *stat) +{ + u32 start_pos = 0; + + stat->rx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_drop_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 16; + stat->tx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; +} + +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat) +{ + u32 stat_arr[64]; + int ret; + + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); + ne6x_dev_transform_vf_stat_format(stat_arr, stat); + + return ret; +} + +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt) +{ + u32 stat_arr[64] = {0}; + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); +} + +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed) +{ + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + case NE6000AI_2S_X16H_25G_N6: + if (speed == SPEED_25000 || speed == SPEED_10000) + return 0; + + return -EOPNOTSUPP; + case NE6000AI_2S_X16H_100G_N5: + if (speed == SPEED_40000 || speed == SPEED_100000) + return 0; + + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (state) + val |= NE6X_F_RX_FW_LLDP; + else + val &= ~NE6X_F_RX_FW_LLDP; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +#define NE6X_METER_STEP 152 +#define NE6X_DF_METER_CBS_PBS (100 * 152) +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate) +{ + u32 val = 0, ret = 0; + u32 cir = 0, cbs = 0; + struct meter_table vf_bw; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + memset(&vf_bw, 0, sizeof(struct meter_table)); + + if (tx_rate) + val |= NE6X_F_TX_QOSBANDWIDTH; + else + val &= ~NE6X_F_TX_QOSBANDWIDTH; + + if (tx_rate) { + cir = tx_rate; + cbs = 0xffffff; + vf_bw.pbs = cbs; + vf_bw.cir = cir; + vf_bw.cbs = cbs; + vf_bw.pir = cir; + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + } else { + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + } + + return ret; +} + +static int ne6x_dev_reg_pattern_test(struct ne6x_pf *pf, u32 reg, u32 val_arg) +{ + struct device *dev; + u32 val, orig_val; + + orig_val = ne6x_reg_apb_read(pf, reg); + dev = ne6x_pf_to_dev(pf); + + ne6x_reg_apb_write(pf, reg, val_arg); + val = ne6x_reg_apb_read(pf, reg); + if (val != val_arg) { + dev_err(dev, "%s: reg pattern test failed - reg 0x%08x val 0x%08x\n", + __func__, reg, val); + return -1; + } + + ne6x_reg_apb_write(pf, reg, orig_val); + val = ne6x_reg_apb_read(pf, reg); + if (val != orig_val) { + dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n", + __func__, reg, orig_val, val); + return -1; + } + + return 0; +} + +#define NE6X_TEST_INT_SET_VALUE 0x1000000000000000 /* bit 60 */ +int ne6x_dev_test_intr(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + union ne6x_vp_int vp_int; + int ret = -1; + + if (base < NE6X_PF_VP0_NUM) { + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT), vp_int.val); + } + } else { + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT), vp_int.val); + } + } + + return ret; +} + +int ne6x_dev_test_reg(struct ne6x_adapter *adpt) +{ + struct ne6x_diag_reg_info test_reg[4] = { + {0x20a00180, 0x5A5A5A5A}, + {0x20a00180, 0xA5A5A5A5}, + {0x20a00188, 0x00000000}, + {0x20a0018c, 0xFFFFFFFF} + }; + u32 value, reg; + int index; + + netdev_dbg(adpt->netdev, "Register test\n"); + for (index = 0; index < ARRAY_SIZE(test_reg); ++index) { + value = test_reg[index].value; + reg = test_reg[index].address; + + /* bail on failure (non-zero return) */ + if (ne6x_dev_reg_pattern_test(adpt->back, reg, value)) + return 1; + } + + return 0; +} + +#define NE6X_LOOP_TEST_TYPE 0x1234 +/* handle hook packet */ +static int ne6x_dev_proto_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *ndev) +{ + struct ne6x_netdev_priv *np = netdev_priv(dev); + struct ne6x_adapter *adpt = np->adpt; + + netdev_info(dev, "recv loopback test packet success!\n"); + adpt->recv_done = true; + + kfree_skb(skb); + wake_up(&adpt->recv_notify); + + return 0; +} + +static u8 loop_dst_mac[8] = {0x00, 0x00, 0x00, 0x11, 0x11, 0x01}; +static int ne6x_dev_proto_send(struct net_device *netdev, char *buf, int len) +{ + struct sk_buff *skb; + u8 *pdata = NULL; + u32 skb_len; + + skb_len = LL_RESERVED_SPACE(netdev) + len; + skb = dev_alloc_skb(skb_len); + if (!skb) + return -1; + + skb_reserve(skb, LL_RESERVED_SPACE(netdev)); + skb->dev = netdev; + skb->ip_summed = CHECKSUM_NONE; + skb->priority = 0; + pdata = skb_put(skb, len); + if (pdata) + memcpy(pdata, buf, len); + + /* send loop test packet */ + if (dev_queue_xmit(skb) < 0) { + dev_put(netdev); + kfree_skb(skb); + netdev_err(netdev, "send pkt fail.\n"); + return -1; + } + netdev_info(netdev, "send loopback test packet success!\n"); + + return 0; +} + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt) +{ + struct packet_type prot_hook; + struct ethhdr *ether_hdr; + u32 old_value; + int ret = 0; + + adpt->send_buffer = kzalloc(2048, GFP_KERNEL); + if (!adpt->send_buffer) + return -ENOMEM; + + /* config mac/pcs loopback */ + if (ne6x_dev_set_mac_inloop(adpt, true)) { + netdev_err(adpt->netdev, "loopback test set_mac_inloop fail !\n"); + return -1; + } + + old_value = ne6x_dev_get_features(adpt); + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + memset(&prot_hook, 0, sizeof(struct packet_type)); + prot_hook.type = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + prot_hook.dev = adpt->netdev; + prot_hook.func = ne6x_dev_proto_recv; + dev_add_pack(&prot_hook); + ether_hdr = (struct ethhdr *)adpt->send_buffer; + memcpy(ether_hdr->h_source, &adpt->port_info->mac.perm_addr[0], ETH_ALEN); + memcpy(ether_hdr->h_dest, loop_dst_mac, ETH_ALEN); + ether_hdr->h_proto = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + adpt->send_buffer[14] = 0x45; + ne6x_dev_proto_send(adpt->netdev, adpt->send_buffer, 1024); + + if (wait_event_interruptible_timeout(adpt->recv_notify, !!adpt->recv_done, + msecs_to_jiffies(2000)) <= 0) { + netdev_info(adpt->netdev, "loopback test fail !\n"); + ret = -1; + } + + adpt->recv_done = false; + kfree(adpt->send_buffer); + adpt->send_buffer = NULL; + /* restore prosimc */ + ne6x_dev_set_features(adpt, old_value); + dev_remove_pack(&prot_hook); + if (ne6x_dev_set_mac_inloop(adpt, false)) { + netdev_err(adpt->netdev, "loopback test cancel_mac_inloop fail\n"); + return -1; + } + + return ret; +} + +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data) +{ + u8 mac_info[8]; + + memcpy(mac_info, data, 6); + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_INFO, NE6X_TALK_SET, ADPT_LPORT(adpt), + (void *)data, sizeof(mac_info)); +} + +static u32 crc_table[CRC32_TABLE_SIZE]; /* 1KB */ +static void ne6x_dev_crc32_for_fw_init(void) +{ + u32 remainder; + u32 dividend; + s32 bit; + + for (dividend = 0U; dividend < CRC32_TABLE_SIZE; ++dividend) { + remainder = dividend; + for (bit = 8; bit > 0; --bit) { + if ((remainder & 1U) != 0) + remainder = (remainder >> 1) ^ CRC32_REVERSED_POLYNOMIAL; + else + remainder >>= 1; + } + + crc_table[dividend] = remainder; + } +} + +static u32 ne6x_dev_crc32_for_fw(const void *message, u32 bytes) +{ + const u8 *buffer = (const u8 *)message; + u32 remainder = CRC32_INITIAL_REMAINDER; + u8 idx; + + ne6x_dev_crc32_for_fw_init(); + + while (bytes-- > 0) { + idx = (u8)(*buffer++ ^ remainder); + remainder = crc_table[idx] ^ (remainder >> 8); + } + + return remainder ^ CRC32_FINALIZE_REMAINDER; +} + +static int ne6x_dev_get_fw_region(const u8 *data, u32 size, int *region) +{ + if (size < NE6X_FW_SIG_LENGTH) + return NE6X_FW_NOT_SUPPORT; + + if (!memcmp(data, NE6X_FW_810_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_APP; + return 0; + } else if (!memcmp(data, NE6X_FW_NP_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_NP; + return 0; + } else if (!memcmp(data, NE6X_FW_PXE_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_PXE; + return 0; + } else if (!memcmp(data, NE6X_FW_810_LDR_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_LOADER; + return 0; + } else if (!memcmp(data, NE6X_FW_FRU_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FRU; + return 0; + } else if (!memcmp(data, NE6X_FW_807_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_807_APP; + return 0; + } else { + return NE6X_FW_NOT_SUPPORT; + } +} + +static int ne6x_dev_check_fw(const u8 *data, const u32 size, const int region) +{ + struct ne6x_fw_common_header *comm_hdr; + struct ne6x_fw_np_header *np_hdr; + u32 hcrc, pcrc, crc; + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FLASH_807_APP: + comm_hdr = (struct ne6x_fw_common_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = comm_hdr->header_crc; + pcrc = comm_hdr->package_crc; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*comm_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (comm_hdr->length != size) + return NE6X_FW_LENGTH_ERR; + + comm_hdr->package_crc = CRC32_INITIAL_REMAINDER; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, comm_hdr->length); + comm_hdr->package_crc = pcrc; + comm_hdr->header_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + case NE6X_ETHTOOL_FLASH_NP: + np_hdr = (struct ne6x_fw_np_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = np_hdr->hdr_crc; + pcrc = np_hdr->pkg_crc; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*np_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (np_hdr->img_length != size) + return NE6X_FW_LENGTH_ERR; + + np_hdr->pkg_crc = CRC32_INITIAL_REMAINDER; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, np_hdr->img_length); + np_hdr->pkg_crc = pcrc; + np_hdr->hdr_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + } + + return 0; +} + +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region) +{ + if (ne6x_dev_get_fw_region(data, size, region)) + return NE6X_FW_NOT_SUPPORT; + + return ne6x_dev_check_fw(data, size, *region); +} + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state) +{ + u32 value = ne6x_dev_get_features(adpt); + + if (tx_state) + value &= ~NE6X_F_TX_DISABLE; + else + value |= NE6X_F_TX_DISABLE; + + if (rx_state) + value &= ~NE6X_F_RX_DISABLE; + else + value |= NE6X_F_RX_DISABLE; + + ne6x_dev_set_features(adpt, value); + + return 0; +} + +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 number_queue) +{ + u32 mode; + + if (is_fast_mode) { + mode = pf->num_alloc_vfs; + mode |= 1 << 16; + pf->is_fastmode = true; + } else { + mode = 0; + pf->is_fastmode = false; + } + + return ne6x_reg_set_user_data(pf, NP_USER_DATA_FAST_MODE, mode); +} + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + return ne6x_reg_get_dump_data_len(pf, size); +} + +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + return ne6x_reg_get_dump_data(pf, data, size); +} + +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + return ne6x_reg_set_norflash_write_protect(pf, write_protect); +} + +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + return ne6x_reg_get_norflash_write_protect(pf, p_write_protect); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h new file mode 100644 index 00000000000000..02d89659623690 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEV_H +#define _NE6X_DEV_H + +#include "ne6x_portmap.h" + +#define NE6000AI_2S_X16H_100G_N5 0xA050 +#define NE6000AI_2S_X16H_25G_N5 0xA030 +#define NE6000AI_2S_X16H_25G_N6 0xA031 + +#define NE6000_IF_INTERFACE_UP 1 +#define NE6000_IF_INTERFACE_DOWN 0 + +struct ne6x_flowctrl { + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; + +struct ne6x_sfp_mod_type_len { + u32 type; + u32 len; +}; + +enum { + NE6X_SOC_TEMPERATURE = 0x0, + NE6X_SOC_POWER_CONSUM, + NE6X_SOC_DDR_TEST, + NE6X_SOC_FRU, + NE6X_SOC_SERDES_SEND_BIT, + NE6X_SOC_I2C3_TEST, +}; + +struct ne6x_soc_temperature { + u32 chip_temerature; + u32 board_temperature; +}; + +struct ne6x_soc_power { + u32 cur; + u32 vol; + u32 power; +}; + +#define NE6X_FW_SIG_OFFSET 0x0 + +#define NE6X_FW_SIG_LENGTH 8 + +#define NE6X_FW_810_LDR_SIG "NE6K810L" +#define NE6X_FW_810_APP_SIG "NE6K810A" +#define NE6X_FW_807_APP_SIG "NE6K807A" +#define NE6X_FW_803_APP_SIG "NE6K803A" +#define NE6X_FW_803_LDR_SIG "NE6K803L" +#define NE6X_FW_NP_APP_SIG "NE6KNPV1" +#define NE6X_FW_TBL_SIG "NE6KTBL*" +#define NE6X_FW_PXE_SIG "NE6KPXE*" +#define NE6X_FW_FRU_SIG "NE6KFRU*" + +struct ne6x_fw_common_header { + u8 signature[NE6X_FW_SIG_LENGTH]; + u32 version; + u32 length; + u32 sections; + u32 sect_start_addr; + u32 type; + u32 build_date; + u8 reserved[16]; + u8 fw_ver[8]; + u32 package_crc; + u32 header_crc; +}; /* 64B */ + +struct ne6x_fw_np_iwidth { + char sig[4]; + u16 width; + u16 ocp; +}; /* 8B */ + +struct ne6x_fw_np_isad { + char sig[4]; + u32 isa_id; + + struct ne6x_fw_np_iwidth fp; + struct ne6x_fw_np_iwidth dp; + struct ne6x_fw_np_iwidth rp; +}; /* 32B */ + +struct ne6x_fw_np_atd { + char sig[4]; + u32 at_id; + + struct ne6x_fw_np_iwidth te; +}; /* 16B */ + +struct ne6x_fw_np_header { + char signature[NE6X_FW_SIG_LENGTH]; + u32 hdr_version; + u32 hdr_length; + + u32 rsvd; + u32 build_date; + u32 img_version; + u32 img_length; + + u32 npc_cnt; + u32 npc_offset; + u32 isa_cnt; + u32 isa_offset; + + u32 at_cnt; + u32 at_offset; + u32 atd_cnt; + u32 atd_offset; + + struct ne6x_fw_np_isad ISA[1]; + + struct ne6x_fw_np_atd ATD[1]; + + u32 cipher; /* For future use */ + u32 comp; /* For future use */ + u32 pkg_crc; + u32 hdr_crc; +}; /* 128 B */ + +#define CRC32_REVERSED_POLYNOMIAL 0xEDB88320U +#define CRC32_INITIAL_REMAINDER 0xFFFFFFFFU +#define CRC32_FINALIZE_REMAINDER 0xFFFFFFFFU +#define CRC32_TABLE_SIZE 256U + +enum { + NE6X_FW_NOT_SUPPORT = -1, + NE6X_FW_HEADER_CRC_ERR = -2, + NE6X_FW_LENGTH_ERR = -3, + NE6X_FW_PKG_CRC_ERR = -4, +}; + +struct ne6x_key_filter { + struct list_head list; + struct ne6x_key key; + struct { + u8 is_new_key : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +struct ne6x_vlan_filter { + struct list_head list; + struct ne6x_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +enum { + NE6X_METER_SUBSET0 = 0x0, + NE6X_METER_SUBSET1, + NE6X_METER_SUBSET2, + NE6X_METER_SUBSET3, + NE6X_METER_SUBSET4, + NE6X_METER_SUBSET5, + NE6X_METER_SUBSET6, + NE6X_METER_SUBSET7, + NE6X_METER_SUBSET8, + NE6X_METER_SUBSET9, + NE6X_METER_SUBSET10, + NE6X_METER_SUBSET11, + NE6X_METER_SUBSET12, + NE6X_METER_SUBSET13, + NE6X_METER_SUBSET14, + NE6X_METER_SUBSET15, +}; + +#define NE6X_METER0_TABLE 0x00000000U +#define NE6X_METER1_TABLE 0x80000000U +#define NE6X_METER_SUBSET(n) (((n) & 0xf) << 27) + +struct vf_stat { + u64 rx_drop_pkts; + u64 rx_broadcast_pkts; + u64 rx_multicast_pkts; + u64 rx_unicast_pkts; + u64 tx_broadcast_pkts; + u64 tx_multicast_pkts; + u64 tx_unicast_pkts; + u64 rx_malform_pkts; + u64 tx_malform_pkts; +}; + +enum ne6x_fec_state { + NE6X_FEC_NONE, + NE6X_FEC_RS, + NE6X_FEC_BASER, + NE6X_FEC_AUTO, +}; + +int ne6x_dev_init(struct ne6x_pf *pf); +int ne6x_dev_get_port_num(struct ne6x_pf *pf); +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt); +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status); +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed); + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt); + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify); + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt); +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 value); + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu); +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu); + +void ne6x_dev_clear_vport(struct ne6x_pf *pf); +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt); +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt); +int ne6x_dev_set_vport(struct ne6x_adapter *adpt); + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf); + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *info); + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt); + +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec); +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec); + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_read_qsfp(struct ne6x_adapter *adpt, u8 regaddr, u8 *data, int len); + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags); + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode); + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags); + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp); +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power); +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size); +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf); +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id); + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state); +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state); + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status); + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state); +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat); +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt); +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed); + +int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 index, + u32 *data, int size); + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state); + +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate); + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt); +int ne6x_dev_test_reg(struct ne6x_adapter *adpt); +int ne6x_dev_test_intr(struct ne6x_adapter *adpt); +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data); +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region); + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state); +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 num_queue); +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable); +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable); +int ne6x_dev_get_pport(struct ne6x_adapter *adpt); +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +u32 ne6x_dev_crc32(const u8 *buf, u32 size); +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable); +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c new file mode 100644 index 00000000000000..31b954bdca46d7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c @@ -0,0 +1,1623 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include +#include "version.h" + +static const char ne6x_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test ", "Loopback test ", "Register test ", "Interrupt test" +}; + +#define NE6X_TEST_LEN (sizeof(ne6x_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6x_q_stats_len(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6x_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6X_NETDEV_STAT(_net_stat) NE6X_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_stats[] = { + NE6X_NETDEV_STAT(rx_packets), + NE6X_NETDEV_STAT(tx_packets), + NE6X_NETDEV_STAT(rx_bytes), + NE6X_NETDEV_STAT(tx_bytes), + NE6X_NETDEV_STAT(rx_errors), + NE6X_NETDEV_STAT(tx_errors), + NE6X_NETDEV_STAT(rx_dropped), + NE6X_NETDEV_STAT(tx_dropped), + NE6X_NETDEV_STAT(collisions), + NE6X_NETDEV_STAT(rx_length_errors), + NE6X_NETDEV_STAT(rx_crc_errors), +}; + +#define NE6X_DEVICE_ETH_STAT(_dev_eth_stat) NE6X_STAT(struct ne6x_eth_stats, \ + #_dev_eth_stat, _dev_eth_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_dev_eth_stats[] = { + NE6X_DEVICE_ETH_STAT(rx_unicast), + NE6X_DEVICE_ETH_STAT(rx_multicast), + NE6X_DEVICE_ETH_STAT(rx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_discards), + NE6X_DEVICE_ETH_STAT(rx_miss), + NE6X_DEVICE_ETH_STAT(tx_unicast), + NE6X_DEVICE_ETH_STAT(tx_multicast), + NE6X_DEVICE_ETH_STAT(tx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_malform), + NE6X_DEVICE_ETH_STAT(tx_malform), +}; + +#define NE6X_PF_STAT(_name, _stat) NE6X_STAT(struct ne6x_pf, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_pf_stats[] = { + NE6X_PF_STAT("tx_timeout", tx_timeout_count), +}; + +/* per-queue ring statistics */ +#define NE6X_QUEUE_STAT(_name, _stat) NE6X_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_tx_queue_stats[] = { + NE6X_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6X_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6X_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6X_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6X_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6X_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6X_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6X_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6x_stats ne6x_gstrings_rx_queue_stats[] = { + NE6X_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6X_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6X_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6X_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6X_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6X_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6x_stats ne6x_gstrings_cq_queue_stats[] = { + NE6X_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6X_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6X_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6X_PORT_MAC_STAT(_name, _stat) NE6X_STAT(struct ne6x_adapter, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_port_mac_stats[] = { + NE6X_PORT_MAC_STAT("port.rx_eth_byte", stats.mac_rx_eth_byte), + NE6X_PORT_MAC_STAT("port.rx_eth", stats.mac_rx_eth), + NE6X_PORT_MAC_STAT("port.rx_eth_undersize", stats.mac_rx_eth_undersize), + NE6X_PORT_MAC_STAT("port.rx_eth_crc_err", stats.mac_rx_eth_crc), + NE6X_PORT_MAC_STAT("port.rx_eth_64b", stats.mac_rx_eth_64b), + NE6X_PORT_MAC_STAT("port.rx_eth_65_127b", stats.mac_rx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.rx_eth_128_255b", stats.mac_rx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.rx_eth_256_511b", stats.mac_rx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.rx_eth_512_1023b", stats.mac_rx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.rx_eth_1024_15360b", stats.mac_rx_eth_1024_15360b), + NE6X_PORT_MAC_STAT("port.tx_eth_byte", stats.mac_tx_eth_byte), + NE6X_PORT_MAC_STAT("port.tx_eth", stats.mac_tx_eth), + NE6X_PORT_MAC_STAT("port.tx_eth_undersize", stats.mac_tx_eth_undersize), + NE6X_PORT_MAC_STAT("port.tx_eth_64b", stats.mac_tx_eth_64b), + NE6X_PORT_MAC_STAT("port.tx_eth_65_127b", stats.mac_tx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.tx_eth_128_255b", stats.mac_tx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.tx_eth_256_511b", stats.mac_tx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.tx_eth_512_1023b", stats.mac_tx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.tx_eth_1024_15360b", stats.mac_tx_eth_1024_15360b), +}; + +#define NE6X_ADPT_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_stats) +#define NE6X_ADPT_DEV_ETH_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_dev_eth_stats) + +#define NE6X_PF_STATS_LEN ARRAY_SIZE(ne6x_gstrings_pf_stats) +#define NE6X_PORT_MAC_STATS_LEN ARRAY_SIZE(ne6x_gstrings_port_mac_stats) + +#define NE6X_ALL_STATS_LEN(n) \ + (NE6X_ADPT_STATS_LEN + NE6X_ADPT_DEV_ETH_STATS_LEN + \ + NE6X_PF_STATS_LEN + NE6X_PORT_MAC_STATS_LEN + ne6x_q_stats_len(n)) + +struct ne6x_priv_flag { + char name[ETH_GSTRING_LEN]; + u32 bitno; /* bit position in pf->flags */ +}; + +#define NE6X_PRIV_FLAG(_name, _bitno) { \ + .name = _name, \ + .bitno = _bitno, \ +} + +static const struct ne6x_priv_flag ne6x_gstrings_priv_flags[] = { + NE6X_PRIV_FLAG("disable-fw-lldp", NE6X_ADPT_F_DISABLE_FW_LLDP), + NE6X_PRIV_FLAG("link-down-on-close", NE6X_ADPT_F_LINKDOWN_ON_CLOSE), + NE6X_PRIV_FLAG("write-protect", NE6X_ADPT_F_NORFLASH_WRITE_PROTECT), + NE6X_PRIV_FLAG("ddos-switch", NE6X_ADPT_F_DDOS_SWITCH), + NE6X_PRIV_FLAG("white-list", NE6X_ADPT_F_ACL), + NE6X_PRIV_FLAG("trust-vlan", NE6X_ADPT_F_TRUST_VLAN), +}; + +#define NE6X_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ne6x_gstrings_priv_flags) + +static void ne6x_get_settings_link_up_fec(struct net_device *netdev, + u32 link_speed, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + enum ne6x_fec_state fec = NE6X_FEC_NONE; + + switch (link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + else if (fec == NE6X_FEC_BASER) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + else + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + + break; + default: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + break; + } +} + +static void ne6x_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) +{ + struct ne6x_link_status *link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + link_info = &adpt->port_info->phy.link_info; + switch (link_info->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 100000baseCR4_Full); + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + link_info->link_speed); + break; + } + + ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & NE6X_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Autoneg); + + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + + ne6x_get_settings_link_up_fec(netdev, link_info->link_speed, ks); +} + +static void ne6x_phy_type_to_ethtool(struct ne6x_adapter *adpt, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); +} + +static void ne6x_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ne6x_phy_type_to_ethtool(adpt, ks); + /* With no link, speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +static int ne6x_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_link_status *hw_link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + hw_link_info = &adpt->port_info->phy.link_info; + + /* set speed and duplex */ + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) + ne6x_get_settings_link_up(ks, netdev); + else + ne6x_get_settings_link_down(ks, netdev); + + if (!ne6x_dev_check_speed(adpt, SPEED_10000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_25000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_100000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseCR4_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_40000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); + + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + + /* Set flow control settings */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + return 0; +} + +static int ne6x_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + bool if_running = netif_running(netdev); + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + u32 master = (adpt->idx == 0); + char *speed = "Unknown "; + u32 link_speed; + u32 sfp_speed; + int ret; + + if (ne6x_dev_check_speed(adpt, ks->base.speed)) { + dev_info(&pf->pdev->dev, "speed not support\n"); + return -EOPNOTSUPP; + } + + if (!master && pf->dev_type == NE6000AI_2S_X16H_25G_N5) { + dev_info(&pf->pdev->dev, "only master port can change speed\n"); + return -EOPNOTSUPP; + } + + switch (ks->base.speed) { + case SPEED_100000: + link_speed = NE6X_LINK_SPEED_100GB; + break; + case SPEED_40000: + link_speed = NE6X_LINK_SPEED_40GB; + break; + case SPEED_25000: + link_speed = NE6X_LINK_SPEED_25GB; + break; + case SPEED_10000: + link_speed = NE6X_LINK_SPEED_10GB; + break; + default: + return -EOPNOTSUPP; + } + + ret = ne6x_dev_get_sfp_speed(adpt, &sfp_speed); + if (!ret) { + switch (sfp_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (sfp_speed != link_speed) + netdev_info(adpt->netdev, "speed not match, sfp support%sbps Full Duplex\n", + speed); + } + + if (if_running) + ne6x_close(adpt->netdev); + + ret = ne6x_dev_set_speed(adpt, link_speed); + if (if_running) + ne6x_open(adpt->netdev); + + return ret; +} + +static void __ne6x_add_stat_strings(u8 **p, const struct ne6x_stats stats[], + const unsigned int size, + ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6x_add_stat_strings(p, stats, ...) \ + __ne6x_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6x_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + unsigned int i; + + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_dev_eth_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_pf_stats); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_add_stat_strings(&data, ne6x_gstrings_tx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_rx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_cq_queue_stats, i); + } + + ne6x_add_stat_strings(&data, ne6x_gstrings_port_mac_stats); +} + +static void ne6x_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + u8 *p = data; + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", ne6x_gstrings_priv_flags[i].name); + p += ETH_GSTRING_LEN; + } +} + +static void ne6x_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6x_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6x_gstrings_test, NE6X_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_PRIV_FLAGS: + ne6x_get_priv_flag_strings(netdev, data); + break; + default: + break; + } +} + +static int ne6x_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return NE6X_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6X_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return NE6X_PRIV_FLAG_ARRAY_SIZE; + default: + return -EOPNOTSUPP; + } +} + +static void ne6x_get_mac_stats(struct ne6x_adapter *adpt) +{ + ne6x_dev_get_mac_stats(adpt); +} + +static void ne6x_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + char *p; + + ne6x_update_pf_stats(adpt); + + for (j = 0; j < NE6X_ADPT_STATS_LEN; j++) { + p = (char *)ne6x_get_adpt_stats_struct(adpt) + + ne6x_gstrings_adpt_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_ADPT_DEV_ETH_STATS_LEN; j++) { + p = (char *)(&adpt->eth_stats) + + ne6x_gstrings_adpt_dev_eth_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_dev_eth_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_PF_STATS_LEN; j++) { + p = (char *)pf + ne6x_gstrings_pf_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_pf_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = READ_ONCE(adpt->tx_rings[j]); + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = READ_ONCE(adpt->rx_rings[j]); + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = READ_ONCE(adpt->cq_rings[j]); + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + + rcu_read_unlock(); + + ne6x_get_mac_stats(adpt); + + for (j = 0; j < NE6X_PORT_MAC_STATS_LEN; j++) { + p = (char *)adpt + ne6x_gstrings_port_mac_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_port_mac_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +extern char ne6x_driver_name[]; + +static void ne6x_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + u32 soc_ver = 0, np_ver = 0, erom_ver = 0; + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + char nvm_version_str[32]; + char driver_name[32]; + char temp_str[16] = {0}; + + snprintf(driver_name, 32, "%s", ne6x_driver_name); + strscpy(drvinfo->driver, driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); + memset(nvm_version_str, 0, sizeof(nvm_version_str)); + soc_ver = pf->verinfo.firmware_soc_ver; + np_ver = pf->verinfo.firmware_np_ver & 0xFFFF; + erom_ver = pf->verinfo.firmware_pxe_ver & 0xFFFF; + snprintf(nvm_version_str, 20, "%d.%d.%d.%d ", (soc_ver & 0xff000000) >> 24, + ((erom_ver & 0xFFFF) / 100), ((soc_ver & 0xFFFF) / 100), + ((np_ver & 0xFFFF) / 100)); + if (erom_ver % 100) { + snprintf(temp_str, 4, "P%d", (erom_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if ((soc_ver & 0xffff) % 100) { + snprintf(temp_str, 4, "A%d", ((soc_ver & 0xffff) % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if (np_ver % 100) { + snprintf(temp_str, 4, "N%d", (np_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + strncpy(drvinfo->fw_version, nvm_version_str, sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6x_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_hw *hw = &pf->hw; + unsigned int i, j, ri; + u32 *reg_buf = p; + u32 reg; + + regs->version = 1; + + /* loop through the diags reg table for what to print */ + ri = 0; + for (i = 0; ne6x_reg_list[i].offset != 0; i++) { + for (j = 0; j < ne6x_reg_list[i].elements; j++) { + reg = ne6x_reg_list[i].offset + (j * ne6x_reg_list[i].stride); + reg_buf[ri++] = rd64(hw, reg); + } + } +} + +static void ne6x_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6X_TEST_LEN); +} + +static int ne6x_get_regs_len(struct net_device *netdev) +{ + int reg_count = 0; + int i; + + for (i = 0; ne6x_reg_list[i].offset != 0; i++) + reg_count += ne6x_reg_list[i].elements; + + return reg_count * sizeof(u32); +} + +static void ne6x_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adpt->num_rx_desc; + ring->tx_pending = adpt->num_tx_desc; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6x_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + u32 new_rx_count, new_tx_count, new_cq_count, new_tg_count; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + int i; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_tx_count + new_rx_count; + new_tg_count = new_tx_count; + + if (new_tx_count == adpt->num_tx_desc && new_rx_count == adpt->num_rx_desc) + return 0; + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + + usleep_range(1000, 2000); + } + + if (!netif_running(adpt->netdev)) { + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adpt->tx_rings[0]->count, adpt->rx_rings[0]->count, new_tx_count, new_rx_count); + + /* simple case - set for the next time the netdev is started */ + for (i = 0; i < adpt->num_queue; i++) { + adpt->tx_rings[i]->count = new_tx_count; + adpt->rx_rings[i]->count = new_rx_count; + adpt->cq_rings[i]->count = new_cq_count; + adpt->tg_rings[i]->count = new_tg_count; + } + + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static void ne6x_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + ret = ne6x_dev_get_flowctrl(adpt, &flowctrl); + if (ret) + return; + + pause->autoneg = 0; + pause->rx_pause = flowctrl.rx_pause; + pause->tx_pause = flowctrl.tx_pause; +} + +static int ne6x_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + if (pause->autoneg) + return -EOPNOTSUPP; + + flowctrl.autoneg = pause->autoneg; + flowctrl.rx_pause = pause->rx_pause; + flowctrl.tx_pause = pause->tx_pause; + + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) + return ret; + + return 0; +} + +static int ne6x_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6x_get_eeprom_len(struct net_device *netdev) { return 256; } + +static int ne6x_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + u8 *eeprom_buff; + int err = 0; + int ret_val; + u32 magic; + + if (eeprom->len == 0) + return -EINVAL; + + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + /* make sure it is the right magic for NVMUpdate */ + if ((eeprom->magic >> 16) != hw->device_id) + err = -EINVAL; + else if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + err = -EBUSY; + + return err; + } + + /* normal ethtool get_eeprom support */ + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = ne6x_dev_read_eeprom(adpt, 0x0, (u8 *)eeprom_buff, eeprom->len); + memcpy(bytes, eeprom_buff, eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) + +static u64 ne6x_get_rss_hash_opts(struct ne6x_adapter *adpt, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6x_set_rss_hash_opts(struct ne6x_adapter *adpt, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adpt->rss_info.hash_type; + int status; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adpt->rss_info.hash_type) + return 0; + + adpt->rss_info.hash_type = rss_flags; + + status = ne6x_dev_set_rss(adpt, &adpt->rss_info); + + return (status != 0) ? (-EIO) : 0; +} + +static int ne6x_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: + info->data = ne6x_get_rss_hash_opts(adpt, info->flow_type); + break; + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ne6x_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int status = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + case ETHTOOL_SRXFH: + status = ne6x_set_rss_hash_opts(adpt, info); + break; + default: + return -EINVAL; + } + + return status; +} + +static u32 ne6x_get_rxfh_key_size(struct net_device *netdev) +{ + return NE6X_RSS_MAX_KEY_SIZE; +} + +static u32 ne6x_get_rss_table_size(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + + return rss_info->ind_table_size; +} + +static int ne6x_get_rxfh(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int n = rss_info->ind_table_size; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (p) { + while (n--) + p[n] = rss_info->ind_table[n]; + } + + if (key) + memcpy(key, rss_info->hash_key, ne6x_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ne6x_set_rxfh(struct net_device *netdev, const u32 *p, const u8 *key, const u8 hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int i; + int status; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + /* Fill out the redirection table */ + if (p) { + /* Allow at least 2 queues w/ SR-IOV. */ + for (i = 0; i < rss_info->ind_table_size; i++) + rss_info->ind_table[i] = p[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(&rss_info->hash_key[0], key, ne6x_get_rxfh_key_size(netdev)); + + status = ne6x_dev_set_rss(adpt, rss_info); + + return (status == 0) ? 0 : (-EIO); +} + +static void ne6x_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adpt->port_info->hw_max_queue; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adpt->num_queue; +} + +static int ne6x_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + int qp_remaining, q_vectors, i; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > pf->hw.expect_vp) + return -EINVAL; + + if (channels->combined_count == adpt->num_queue) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + netdev_info(netdev, "ne6x config busy, timeout!!!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* set for the next time the netdev is started */ + if (!netif_running(adpt->netdev)) { + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static int ne6x_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + ne6x_dev_set_led(adpt, true); + return 1; + case ETHTOOL_ID_ON: + return 0; + case ETHTOOL_ID_OFF: + return 0; + case ETHTOOL_ID_INACTIVE: + ne6x_dev_set_led(adpt, false); + } + + return 0; +} + +static int ne6x_nway_reset(struct net_device *netdev) { return 0; } + +static u64 ne6x_link_test(struct net_device *netdev, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool link_up = false; + int verify; + + verify = 0; + link_up = adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP; + usleep_range(10, 20); + + link_up &= verify; + if (link_up) + *data = 1; + else + *data = 0; + + return *data; +} + +static void ne6x_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + /* Online tests */ + if (ne6x_link_test(netdev, &data[NE6X_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[NE6X_ETH_TEST_LOOPBACK] = 0; + if (ne6x_dev_test_loopback(adpt)) { + data[NE6X_ETH_TEST_LOOPBACK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_REG] = 0; + if (ne6x_dev_test_reg(adpt)) { + data[NE6X_ETH_TEST_REG] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_INT] = 0; + if (ne6x_dev_test_intr(adpt)) { + data[NE6X_ETH_TEST_INT] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ne6x_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct ne6x_link_status *hw_link_info; + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err = 0; + + hw_link_info = &adpt->port_info->phy.link_info; + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) { + switch (hw_link_info->link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + err = ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) { + fecparam->fec |= ETHTOOL_FEC_RS; + fecparam->active_fec = ETHTOOL_FEC_RS; + } else if (fec == NE6X_FEC_BASER) { + fecparam->fec |= ETHTOOL_FEC_BASER; + fecparam->active_fec = ETHTOOL_FEC_BASER; + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + break; + default: + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + + return err; +} + +static int ne6x_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int err = 0; + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: AUTO"); + err = -EINVAL; + goto done; + case ETHTOOL_FEC_RS: + fec = NE6X_FEC_RS; + break; + case ETHTOOL_FEC_BASER: + fec = NE6X_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + fec = NE6X_FEC_NONE; + break; + default: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", fecparam->fec); + err = -EINVAL; + goto done; + } + + err = ne6x_dev_set_fec(adpt, fec); + if (err) + return err; + +done: + return err; +} + +static const char * const flash_region_strings[] = { + "810 loader", + "810 app", + "807 app", + "NP Image", + "PXE Image", +}; + +static int ethtool_flash_firmware(struct net_device *netdev, u32 type, const u8 *data, + u32 size) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int ret; + + ret = ne6x_dev_upgrade_firmware(adpt, type, (u8 *)data, size, 1); + if (ret) + dev_err(&pf->pdev->dev, "Failed to flash firmware\n"); + + return ret; +} + +static int ethtool_flash_region(struct net_device *netdev, const u8 *data, u32 size, u32 region) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret; + + netdev_info(netdev, "%s = 0x%x\n", __func__, region); + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_NP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FRU: + case NE6X_ETHTOOL_FLASH_807_APP: + ret = ethtool_flash_firmware(netdev, region, data, size); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + if (ret) + dev_info(&pf->pdev->dev, "loading %s fail, reload driver\n", + flash_region_strings[region]); + + return ret; +} + +static int ne6x_ethtool_get_flash_region(struct net_device *netdev, const u8 *data, u32 *size) +{ + int region = -1; + int ret; + + ret = ne6x_dev_validate_fw(data, *size, ®ion); + if (ret) { + netdev_err(netdev, "firmware error ret = %d\n", ret); + return -1; + } + + return region; +} + +static int ne6x_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + const struct firmware *fw; + unsigned int master; + size_t fw_size; + u8 *fw_data; + int region; + int ret; + + master = (adpt->idx == 0); + if (!master) { + dev_info(&pf->pdev->dev, "only master port can upgrade\n"); + return -1; + } + + ret = request_firmware(&fw, ef->data, &pf->pdev->dev); + if (ret < 0) + return ret; + + fw_data = (u8 *)fw->data; + fw_size = fw->size; + if (fw_size > 0) { + region = ne6x_ethtool_get_flash_region(netdev, fw_data, (u32 *)&fw_size); + if (region < 0) { + ret = region; + goto out_free_fw; + } + + ret = ethtool_flash_region(netdev, fw_data, fw_size, region); + if (ret) + goto out_free_fw; + } + +out_free_fw: + release_firmware(fw); + return ret; +} + +#define NE6X_FIRMWARE_RESET_CHIP \ + ((ETH_RESET_MGMT | ETH_RESET_IRQ | \ + ETH_RESET_DMA | ETH_RESET_FILTER | \ + ETH_RESET_OFFLOAD | ETH_RESET_MAC | \ + ETH_RESET_PHY | ETH_RESET_RAM) << ETH_RESET_SHARED_SHIFT) + +static int ne6x_reset(struct net_device *netdev, u32 *flags) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool reload = false; + u32 req = *flags; + + if (!req) + return -EINVAL; + + if (adpt->idx != 0x0) { + netdev_err(netdev, "Reset is not supported from a eth0_nfp1\n"); + return -EOPNOTSUPP; + } + + if ((req & NE6X_FIRMWARE_RESET_CHIP) == NE6X_FIRMWARE_RESET_CHIP) { + /* This feature is not supported in older firmware versions */ + if (!ne6x_dev_reset_firmware(adpt)) { + netdev_info(netdev, "Firmware reset request successful.\n"); + reload = true; + *flags &= ~NE6X_FIRMWARE_RESET_CHIP; + } + } + + if (reload) + netdev_info(netdev, "Reload driver to complete reset\n"); + + return 0; +} + +static int ne6x_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_sfp_mod_type_len sfp_mod; + int err; + + err = ne6x_dev_get_sfp_type_len(adpt, &sfp_mod); + if (err) + return err; + + modinfo->type = sfp_mod.type; + modinfo->eeprom_len = sfp_mod.len; + netdev_info(netdev, "type %d erprom_len %d.\n", sfp_mod.type, sfp_mod.len); + + return 0; +} + +#define STD_SFP_INFO_MAX_SIZE 640 + +static int ne6x_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + err = ne6x_dev_get_sfp_eeprom(adpt, sfp_data, ee->offset, ee->len, 0); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} + +static u32 ne6x_get_priv_flags(struct net_device *netdev) +{ + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 is_write_proterct = false; + u32 i, ret_flags = 0; + u32 value = 0; + + ne6x_dev_get_norflash_write_protect(adpt->back, &is_write_proterct); + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + if (ne6x_dev_get_trust_vlan(adpt->back)) + set_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + else + clear_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + if (test_bit(priv_flag->bitno, adpt->flags)) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +static int ne6x_set_priv_flags(struct net_device *netdev, u32 flags) +{ + DECLARE_BITMAP(change_flags, NE6X_ADPT_F_NBITS); + DECLARE_BITMAP(orig_flags, NE6X_ADPT_F_NBITS); + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int ret = 0; + u32 i; + + if (flags > BIT(NE6X_PRIV_FLAG_ARRAY_SIZE)) + return -EINVAL; + + bitmap_copy(orig_flags, adpt->flags, NE6X_ADPT_F_NBITS); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + set_bit(priv_flag->bitno, adpt->flags); + else + clear_bit(priv_flag->bitno, adpt->flags); + } + + bitmap_xor(change_flags, adpt->flags, orig_flags, NE6X_ADPT_F_NBITS); + + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, change_flags)) { + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags)) + ne6x_dev_set_fw_lldp(adpt, false); + else + ne6x_dev_set_fw_lldp(adpt, true); + } + + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, change_flags)) { + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags)) + ne6x_dev_set_norflash_write_protect(adpt->back, true); + else + ne6x_dev_set_norflash_write_protect(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, change_flags)) { + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags)) + ne6x_dev_set_ddos(adpt->back, true); + else + ne6x_dev_set_ddos(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_ACL, change_flags)) { + if (adpt->idx != 0) { + netdev_err(netdev, "only adpt 0 support acl flag\n"); + return -EINVAL; + } + if (test_bit(NE6X_ADPT_F_ACL, adpt->flags)) { + if (ne6x_dev_set_white_list(adpt->back, true)) + return -EPERM; + } else { + ne6x_dev_set_white_list(adpt->back, false); + } + } + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, change_flags)) { + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags)) + ne6x_dev_set_trust_vlan(adpt->back, true); + else + ne6x_dev_set_trust_vlan(adpt->back, false); + } + return ret; +} + +static int ne6x_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + + dump->version = 1; + dump->flag = 0; + + /* Calculate the requested preset idx length */ + if (ne6x_dev_get_dump_data_len(pf, &dump->len)) { + dump->len = 0; + return -EAGAIN; + } + + return 0; +} + +static int ne6x_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, void *buffer) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + u32 *p = buffer; + + if (ne6x_dev_get_dump_data(pf, p, dump->len)) + return -EAGAIN; + + return 0; +} + +static const struct ethtool_ops ne6x_ethtool_ops = { + .get_link_ksettings = ne6x_get_link_ksettings, + .set_link_ksettings = ne6x_set_link_ksettings, + .get_strings = ne6x_get_strings, + .get_sset_count = ne6x_get_sset_count, + .get_ethtool_stats = ne6x_get_ethtool_stats, + .get_drvinfo = ne6x_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6x_get_regs, + .get_regs_len = ne6x_get_regs_len, + .get_dump_flag = ne6x_get_dump_flag, + .get_dump_data = ne6x_get_dump_data, + .self_test = ne6x_self_test, + .get_ringparam = ne6x_get_ringparam, + .set_ringparam = ne6x_set_ringparam, + .get_pauseparam = ne6x_get_pauseparam, + .set_pauseparam = ne6x_set_pauseparam, + .get_coalesce = ne6x_get_coalesce, + .get_eeprom_len = ne6x_get_eeprom_len, + .get_eeprom = ne6x_get_eeprom, + .get_rxnfc = ne6x_get_rxnfc, + .set_rxnfc = ne6x_set_rxnfc, + .get_rxfh_key_size = ne6x_get_rxfh_key_size, + .get_rxfh_indir_size = ne6x_get_rss_table_size, + .get_rxfh = ne6x_get_rxfh, + .set_rxfh = ne6x_set_rxfh, + .get_channels = ne6x_get_channels, + .set_channels = ne6x_set_channels, + .flash_device = ne6x_set_flash, + .reset = ne6x_reset, + .get_module_info = ne6x_get_module_info, + .get_module_eeprom = ne6x_get_module_eeprom, + .get_priv_flags = ne6x_get_priv_flags, + .set_priv_flags = ne6x_set_priv_flags, + .set_phys_id = ne6x_set_phys_id, + .nway_reset = ne6x_nway_reset, + .self_test = ne6x_diag_test, + .get_fecparam = ne6x_get_fec_param, + .set_fecparam = ne6x_set_fec_param, +}; + +void ne6x_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6x_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h new file mode 100644 index 00000000000000..54d84d65900f20 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ETHTOOL_H +#define _NE6X_ETHTOOL_H + +#define NE6X_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6x_ethtool_test_id { + NE6X_ETH_TEST_LINK, + NE6X_ETH_TEST_LOOPBACK, + NE6X_ETH_TEST_REG, + NE6X_ETH_TEST_INT, + NE6X_ETH_TEST_CHIP_TEMPERATUR, + NE6X_ETH_TEST_BOARD_TEMPERATUR, + NE6X_ETH_TEST_CURRENT, + NE6X_ETH_TEST_VOLTAGE, + NE6X_ETH_TEST_POWER, + NE6X_ETH_TEST_I2C3, +}; + +void ne6x_set_ethtool_ops(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c new file mode 100644 index 00000000000000..99b228a3c8d891 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_interrupt.h" + +static int ne6x_init_msix(struct ne6x_pf *pf, int budget) +{ + int actual_vector; + ssize_t size; + + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, budget); + dev_info(&pf->pdev->dev, "%s actual_vector = %d\n", __func__, actual_vector); + if (actual_vector <= 0) { + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + dev_err(&pf->pdev->dev, "error msix enable failed\n"); + return -ENODEV; + } + + size = struct_size(pf->irq_pile, list, actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + return 0; +} + +static int ne6x_init_intx(struct ne6x_pf *pf) +{ + int actual_vector; + ssize_t size; + + dev_info(&pf->pdev->dev, "try enable intx\n"); + actual_vector = 0x1; + + size = struct_size(pf->irq_pile, list, actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + test_and_set_bit(NE6X_PF_INTX, pf->state); + + return 0; +} + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf) +{ + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + u64 __iomem *reg; + int err; + int i; + + pf->msix_entries = kcalloc(NE6X_MAX_MSIX_NUM, sizeof(struct msix_entry), GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < NE6X_MAX_MSIX_NUM; i++) + pf->msix_entries[i].entry = i; + + test_and_set_bit(NE6X_PF_MSIX, pf->state); + + if (ne6x_init_msix(pf, NE6X_MAX_MSIX_NUM)) { + clear_bit(NE6X_PF_MSIX, pf->state); + err = ne6x_init_intx(pf); + if (err) { + dev_err(&pf->pdev->dev, "error intx enable failed\n"); + return err; + } + } + + if (pf->irq_pile->num_entries >= NE6X_MAX_MSIX_NUM) { + err = ne6x_init_link_irq(pf); + if (err) { + dev_err(&pf->pdev->dev, "init int irq failed\n"); + return err; + } + } + + /* We only initialize int once, so as not to overwrite user settings */ + if (test_and_set_bit(NE6X_INT_INIT_DOWN, pf->state)) + return 0; + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + return 0; +} + +static int ne6x_adpt_alloc_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->adpt = adpt; + q_vector->v_idx = v_idx; + + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + + if (adpt->netdev) + netif_napi_add(adpt->netdev, &q_vector->napi, ne6x_napi_poll); + + /* tie q_vector and adpt together */ + adpt->q_vectors[v_idx] = q_vector; + return 0; +} + +static void ne6x_free_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *ring; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + if (!q_vector) { + dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); + return; + } + + /* disassociate q_vector from rings */ + ne6x_for_each_ring(ring, q_vector->tx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->rx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->cq) ring->q_vector = NULL; + + /* only adapter w/ an associated netdev is set up w/ NAPI */ + if (adpt->netdev) + netif_napi_del(&q_vector->napi); + + adpt->q_vectors[v_idx] = NULL; + kfree(q_vector); +} + +static int ne6x_adpt_alloc_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx, num_q_vectors, err; + + /* if not MSIX, give the one vector only to the LAN adapter */ + num_q_vectors = adpt->num_q_vectors; + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ne6x_adpt_alloc_q_vector(adpt, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ne6x_free_q_vector(adpt, v_idx); + + return err; +} + +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx; + + for (v_idx = 0; v_idx < adpt->num_q_vectors; v_idx++) + ne6x_free_q_vector(adpt, v_idx); +} + +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int ret = -ENOENT; + + if (adpt->q_vectors[0]) { + dev_info(&pf->pdev->dev, "adapter %d has existing q_vectors\n", adpt->idx); + return -EEXIST; + } + + if (adpt->base_vector) { + dev_info(&pf->pdev->dev, "adapter %d has non-zero base vector %d\n", adpt->idx, + adpt->base_vector); + return -EEXIST; + } + + ret = ne6x_adpt_alloc_q_vectors(adpt); + if (ret) { + dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for adapter %d, ret=%d\n", + adpt->num_q_vectors, adpt->idx, ret); + adpt->num_q_vectors = 0; + goto vector_setup_out; + } + + if (adpt->num_q_vectors) + adpt->base_vector = adpt->port_info->hw_queue_base; + + if (adpt->base_vector < 0) { + dev_info(&pf->pdev->dev, "failed to get tracking for %d vectors for adapter %d, err=%d\n", + adpt->num_q_vectors, adpt->idx, adpt->base_vector); + ne6x_adpt_free_q_vectors(adpt); + ret = -ENOENT; + goto vector_setup_out; + } + +vector_setup_out: + return ret; +} + +static void ne6x_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector = + container_of(notify, struct ne6x_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +static void ne6x_irq_affinity_release(struct kref *ref) {} + +static int ne6x_adpt_request_irq_msix(struct ne6x_adapter *adpt, char *basename) +{ + int q_vectors = adpt->num_q_vectors; + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int rx_int_idx = 0; + int tx_int_idx = 0; + int vector, err; + int irq_num; + int cpu; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[vector]; + + irq_num = pf->msix_entries[base + vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, adpt->irq_handler, 0, q_vector->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "MSIX request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6x_irq_affinity_notify; + q_vector->affinity_notify.release = ne6x_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + adpt->irqs_ready = true; + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = pf->msix_entries[base + vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adpt->q_vectors[vector]); + } + + return err; +} + +static irqreturn_t ne6x_intr(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + u64 reg_val; + + reg_val = rd64(hw, NE6X_VPINT_DYN_CTLN(0, NE6X_VP_INT)); + if (!(reg_val & 0x10000)) + return IRQ_NONE; + + napi_schedule(&q_vector->napi); + return IRQ_HANDLED; +} + +static int ne6x_adpt_request_irq_intx(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[0]; + struct net_device *netdev = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + u32 irq = pf->pdev->irq; + int err; + + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-INTx", basename, "TxRx"); + + err = request_irq(irq, &ne6x_intr, IRQF_SHARED, netdev->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "INTx request_irq failed, error: %d\n", err); + return err; + } + + return 0; +} + +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_pf *pf = adpt->back; + int err; + + if (test_bit(NE6X_PF_MSIX, pf->state)) + err = ne6x_adpt_request_irq_msix(adpt, basename); + else + err = ne6x_adpt_request_irq_intx(adpt, basename); + + if (err) + dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); + + return err; +} + +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + union ne6x_int_cfg int_cfg; + u32 qp, nextqp; + int i, q; + + /* The interrupt indexing is offset by 1 in the PFINT_ITRn + * and PFINT_LNKLSTn registers, e.g.: + * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) + */ + qp = adpt->base_queue; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (qp < NE6X_PF_VP0_NUM) { + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp + i + q; + + int_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG), int_cfg.val); + + int_mask.val = rd64(hw, + NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } else { + /* SRIOV mode PF Config */ + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp - NE6X_PF_VP0_NUM + i + q; + + int_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG), + int_cfg.val); + + int_mask.val = + rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } +} + +static inline void ne6x_irq_dynamic_enable(struct ne6x_adapter *adpt, int vector) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + + if (vector < NE6X_PF_VP0_NUM) { + int_mask.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK), int_mask.val); + } else { + int_mask.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + int_mask.val); + } +} + +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt) +{ + int i; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_irq_dynamic_enable(adpt, adpt->base_vector + i); + + return 0; +} + +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + int i; + + /* disable each interrupt */ + if (base < NE6X_PF_VP0_NUM) { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT), NE6X_MAX_U64); + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT_MASK), NE6X_MAX_U64); + } + } else { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT), + NE6X_MAX_U64); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + NE6X_MAX_U64); + } + } + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + for (i = 0; i < adpt->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } else { + synchronize_irq(pf->pdev->irq); + } +} + +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int i; + + if (!adpt->q_vectors) + return; + + if (!adpt->irqs_ready) + return; + + adpt->irqs_ready = false; + for (i = 0; i < adpt->num_q_vectors; i++) { + int irq_num; + u16 vector; + + vector = i + base; + irq_num = pf->msix_entries[vector].vector; + + /* free only the irqs that were actually requested */ + if (!adpt->q_vectors[i] || !adpt->q_vectors[i]->num_ringpairs) + continue; + + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); + + /* remove our suggested affinity mask for this IRQ */ + irq_set_affinity_hint(irq_num, NULL); + + synchronize_irq(irq_num); + free_irq(irq_num, adpt->q_vectors[i]); + } +} + +static void ne6x_reset_interrupt_capability(struct ne6x_pf *pf) +{ + /* If we're in Legacy mode, the interrupt was cleaned in adpt_close */ + if (pf->msix_entries) { + pci_disable_msix(pf->pdev); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + } + + kfree(pf->irq_pile); + pf->irq_pile = NULL; +} + +int ne6x_init_link_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->link_intname, sizeof(pf->link_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "link", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_NIC_INT_VP].vector; + err = request_irq(irq_num, ne6x_linkint_irq_handler, 0, pf->link_intname, pf); + if (!err) + pf->link_int_irq_ready = true; + + return 0; +} + +int ne6x_enable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 temp = 1; + int i = 0; + + if (!pf->link_int_irq_ready) + return 0; + + for (i = 0; i < pf->hw.pf_port; i++) + int_mask &= ~(temp << (i + NE6X_NIC_INT_START_BIT)); + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + + return 0; +} + +int ne6x_disable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 int_val; + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + int_val = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT)); + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT), + int_val); + + return 0; +} + +void ne6x_free_link_irq(struct ne6x_pf *pf) +{ + if (pf->link_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector); + free_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector, pf); + } + + pf->link_int_irq_ready = false; +} + +static irqreturn_t ne6x_msix_clean_vf_mbx(int irq, void *data) +{ + struct ne6x_pf *pf = data; + struct ne6x_hw *hw = &pf->hw; + bool have_cmd = false; + struct ne6x_vf *vf; + u64 int_val = 0; + u64 val; + int i; + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + test_and_set_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = true; + have_cmd = true; + int_val |= (1ULL << vf->base_queue); + } + } + + if (have_cmd) { + ne6x_service_event_schedule(pf); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), int_val); + } + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), + (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + pf->hw.ne6x_mbx_ready_to_send[i] = true; + } + } + + return IRQ_HANDLED; +} + +int ne6x_init_mailbox_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->mailbox_intname, sizeof(pf->mailbox_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "mailbox", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector; + err = request_irq(irq_num, ne6x_msix_clean_vf_mbx, 0, pf->mailbox_intname, pf); + if (!err) + pf->mailbox_int_irq_ready = true; + + dev_info(&pf->pdev->dev, "reg mailbox irq id= %d,name = %s\n", irq_num, + pf->mailbox_intname); + + return err; +} + +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +void ne6x_free_mailbox_irq(struct ne6x_pf *pf) +{ + if (pf->mailbox_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector); + free_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector, pf); + } + + pf->mailbox_int_irq_ready = false; +} + +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) + ne6x_adpt_free_q_vectors(pf->adpt[i]); + } + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + ne6x_reset_interrupt_capability(pf); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h new file mode 100644 index 00000000000000..e8d512d965a14f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_INTERRUPT_H +#define _NE6X_INTERRUPT_H + +#include "ne6x.h" + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf); +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt); +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename); +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt); +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt); +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt); +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf); +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt); +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data); +int ne6x_enable_link_irq(struct ne6x_pf *pf); +int ne6x_disable_link_irq(struct ne6x_pf *pf); +int ne6x_init_link_irq(struct ne6x_pf *pf); +void ne6x_free_link_irq(struct ne6x_pf *pf); +int ne6x_init_mailbox_irq(struct ne6x_pf *pf); +void ne6x_free_mailbox_irq(struct ne6x_pf *pf); +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c new file mode 100644 index 00000000000000..80928fcf63a886 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c @@ -0,0 +1,3105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_debugfs.h" +#include "ne6x_arfs.h" +#include "version.h" +#include "ne6x_netlink.h" +#include "ne6x_interrupt.h" + +#define SUMMARY "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver" +#define COPYRIGHT "Copyright(c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6x_driver_name[] = "ncepf"; + +static const char ne6x_driver_string[] = SUMMARY; + +const char ne6x_driver_version_str[] = VERSION; +static const char ne6x_copyright[] = COPYRIGHT; + +/* ne6x_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ne6x_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x5010), 0}, + {PCI_VDEVICE(BZWX, 0x5011), 0}, + {PCI_VDEVICE(BZWX, 0x6010), 0}, + {PCI_VDEVICE(BZWX, 0x6011), 0}, + /* required last entry */ + {0, 0}, +}; + +MODULE_DEVICE_TABLE(pci, ne6x_pci_tbl); +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION("Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static struct workqueue_struct *ne6x_wq; +static const struct net_device_ops ne6x_netdev_ops; + +bool netif_is_ne6x(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &ne6x_netdev_ops); +} + +static int ne6x_hw_init(struct ne6x_hw *hw) +{ + int cpu_num = num_online_cpus(); + + /* max phy_port */ + hw->pf_port = ne6x_dev_get_port_num(hw->back); + /* expect vp queue */ + hw->expect_vp = NE6X_MAX_VP_NUM / hw->pf_port; + /* actal max vp queue */ + hw->max_queue = min_t(int, cpu_num, hw->expect_vp); + + hw->port_info = devm_kzalloc(ne6x_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); + if (!hw->port_info) + return -EIO; + + /* set the back pointer to HW */ + hw->port_info->hw = hw; + + if (!is_valid_ether_addr(hw->port_info->mac.perm_addr)) + eth_random_addr(hw->port_info->mac.perm_addr); + + return 0; +} + +static int ne6x_aq_get_phy_capabilities(struct ne6x_adapter *adpt, bool is_up, bool get_hw_stats) +{ + struct ne6x_port_info *port_info = adpt->port_info; + + /* read link states */ + if (get_hw_stats) + ne6x_dev_get_link_status(adpt, &port_info->link_status); + + if (is_up) { + if (port_info->link_status.link) { + port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (port_info->link_status.speed) { + case NE6X_LINK_SPEED_10GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +static int ne6x_aq_get_vf_link_status(struct ne6x_adapter *adpt, bool is_up) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_adapter *pf_adpt = pf->adpt[(adpt->port_info->lport >= pf->hw.pf_port) ? + (pf->hw.pf_port - 1) : adpt->port_info->lport]; + struct ne6x_link_info *pf_link_status = &pf_adpt->port_info->link_status; + struct ne6x_port_info *vf_port_info = adpt->port_info; + + if (is_up) { + if (pf_link_status->link) { + vf_port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (pf_link_status->speed) { + case NE6X_LINK_SPEED_10GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + vf_port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + vf_port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + vf_port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +static void ne6x_adpt_link_event(struct ne6x_adapter *adpt, bool link_up) +{ + if (!adpt) + return; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state) || !adpt->netdev) + return; + + if (link_up == netif_carrier_ok(adpt->netdev)) + return; + + if (link_up) { + netif_carrier_on(adpt->netdev); + netif_tx_wake_all_queues(adpt->netdev); + } else { + netif_carrier_off(adpt->netdev); + netif_tx_stop_all_queues(adpt->netdev); + } +} + +static void ne6x_print_link_message(struct ne6x_adapter *adpt, bool isup) +{ + char *speed = "Unknown "; + char *an = "False"; + u16 new_speed; + + if (isup) + new_speed = adpt->port_info->phy.link_info.link_speed; + else + new_speed = NE6X_LINK_SPEED_UNKNOWN; + + if (adpt->current_isup == isup && adpt->current_speed == new_speed) + return; + + adpt->current_isup = isup; + adpt->current_speed = new_speed; + + if (!isup) { + netdev_info(adpt->netdev, "NIC Link is Down\n"); + return; + } + + switch (adpt->port_info->phy.link_info.link_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (adpt->port_info->phy.link_info.an_info) + an = "True"; + + netdev_info(adpt->netdev, "NIC Link is Up, %sbps Full Duplex, Autoneg: %s\n", speed, an); +} + +static void ne6x_link_event(struct ne6x_pf *pf) +{ + struct ne6x_phy_info *phy_info; + struct ne6x_adapter *adpt = NULL; + u32 old_link_speed; + bool old_link; + bool link_up; + int i; +#ifdef CONFIG_PCI_IOV + struct ne6x_vf *vf; + int vf_id; +#endif + + for (i = 0; i < pf->num_alloc_adpt; i++) { + link_up = false; + adpt = pf->adpt[i]; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_aq_get_phy_capabilities(adpt, false, true); + else + ne6x_aq_get_phy_capabilities(adpt, true, true); + + /* add sfp online state begin */ + ne6x_dev_get_sfp_status(adpt, &phy_info->link_info.ext_info); + if (phy_info->link_info.ext_info != phy_info->link_info_old.ext_info) { + if (phy_info->link_info.ext_info == 0) + netdev_info(adpt->netdev, "adpt->id= %d,optical module unplugged", + adpt->idx); + else + netdev_info(adpt->netdev, "adpt->id= %d,optical module plugged", + adpt->idx); + } + + /* end sfp online state */ + old_link = !!(adpt->port_info->phy.link_info_old.link_info & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + /* if the old link up/down is the same as the new */ + if (link_up == old_link) { + if (link_up && old_link_speed != adpt->port_info->phy.link_info.link_speed) + ne6x_print_link_message(adpt, link_up); + + continue; + } + + ne6x_adpt_link_event(adpt, link_up); + ne6x_print_link_message(adpt, link_up); + } + +#ifdef CONFIG_PCI_IOV + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + if (test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + if (!vf->rx_tx_state) { + adpt->port_info->phy.link_info.link_info = 0x0; + vf->rx_tx_state = true; + } + link_up = false; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + ne6x_aq_get_vf_link_status(adpt, true); + old_link = !!(adpt->port_info->phy.link_info_old.link_info + & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + if (link_up == old_link && + old_link_speed == adpt->port_info->phy.link_info.link_speed) + continue; + + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + ne6x_vc_notify_link_state(vf); + } + } +#endif +} + +static void ne6x_clean_link_status_subtask(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_LINK_POOLING, pf->state)) + return; + + ne6x_link_event(pf); +} + +void ne6x_service_event_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->serv_task); +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt); + +static void ne6x_do_reset(struct ne6x_pf *pf, u32 reset_flags, bool lock_acquired) +{ + struct ne6x_adapter *adpt = NULL; + int i; + + WARN_ON(in_interrupt()); + + if (reset_flags & BIT_ULL(NE6X_PF_RESET_REQUESTED)) { + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_RECOVER, adpt->comm.state)) { + ne6x_adpt_reinit_locked(adpt); + clear_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + } + } + } else if (reset_flags & BIT_ULL(NE6X_CORE_RESET_REQUESTED)) { + /* hardware reset:include PCIE,CORE.etc. */ + dev_info(&pf->pdev->dev, "timeout info: CORE reset\n"); + } else { + dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); + } +} + +static void ne6x_recover_hang_subtask(struct ne6x_pf *pf) +{ + u32 reset_flags = 0; + + if (test_and_clear_bit(NE6X_PF_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_PF_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_CORE_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_CORE_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_GLOBAL_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_GLOBAL_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_DOWN_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_DOWN_REQUESTED); + + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) { + clear_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + test_and_clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + } + + /* If we're already down or resetting, just bail */ + if (reset_flags && !test_bit(NE6X_DOWN, pf->state) && + !test_bit(NE6X_CONFIG_BUSY, pf->state)) + ne6x_do_reset(pf, reset_flags, false); +} + +static void ne6x_service_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, serv_tmr); + + if (pf->num_alloc_vfs) + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->service_timer_period)); + + ne6x_service_event_schedule(pf); +} + +void ne6x_linkscan_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_linkscan_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, linkscan_tmr); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ)); + else + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ * 30)); + + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_service_task(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, serv_task); + unsigned long start_time = jiffies; + +#ifdef CONFIG_PCI_IOV + /* vf command process */ + ne6x_vc_process_vf_msg(pf); +#endif + + ne6x_recover_hang_subtask(pf); + + ne6x_sync_arfs_fltrs(pf); + + /* If the tasks have taken longer than one timer cycle or there + * is more work to be done, reschedule the service task now + * rather than wait for the timer to tick again. + */ + if (time_after(jiffies, (start_time + pf->service_timer_period)) || + test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state) || + test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + ne6x_service_event_schedule(pf); +} + +static void ne6x_linkscan_work(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, linkscan_work); + + ne6x_clean_link_status_subtask(pf); +} + +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data) +{ + struct ne6x_pf *pf = data; + u64 intval = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + + wr64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT), + intval); + ne6x_linkscan_schedule(pf); + + return IRQ_HANDLED; +} + +static int ne6x_pf_init(struct ne6x_pf *pf) +{ + pf->ctrl_adpt_idx = 0; + mutex_init(&pf->switch_mutex); + + /* set up periodic task facility */ + timer_setup(&pf->serv_tmr, ne6x_service_timer, 0); + pf->service_timer_period = HZ; + timer_setup(&pf->linkscan_tmr, ne6x_linkscan_timer, 0); + add_timer(&pf->serv_tmr); + + INIT_WORK(&pf->serv_task, ne6x_service_task); + INIT_WORK(&pf->linkscan_work, ne6x_linkscan_work); + + clear_bit(NE6X_SERVICE_SCHED, pf->state); + + pf->next_adpt = 0; + pf->num_alloc_adpt = pf->hw.pf_port; + pf->num_alloc_vfs = 0; + pf->mailbox_int_irq_ready = false; + pf->link_int_irq_ready = false; + + ne6x_dbg_pf_init(pf); + ne6x_proc_pf_init(pf); + + /* init key list head node */ + spin_lock_init(&pf->key_list_lock); + INIT_LIST_HEAD(&pf->key_filter_list); + + return 0; +} + +static void ne6x_set_num_rings_in_adpt(struct ne6x_adapter *adpt) +{ + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + adpt->num_tx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_rx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_cq_desc = adpt->num_tx_desc + adpt->num_rx_desc; + adpt->num_tg_desc = adpt->num_tx_desc; + adpt->irqs_ready = false; +} + +static irqreturn_t ne6x_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring && !q_vector->tg.ring) + return IRQ_HANDLED; + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), + 0xffffffffffffffff); + else + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + 0xffffffffffffffff); + + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt) +{ + struct ne6x_ring **next_rings; + int ret = -ENODEV; + int size; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + adpt->netdev_registered = false; + size = sizeof(struct ne6x_ring *) * adpt->num_queue * 4; + adpt->tx_rings = kzalloc(size, GFP_KERNEL); + if (!adpt->tx_rings) + goto err_rings; + + next_rings = adpt->tx_rings + adpt->num_queue; + adpt->cq_rings = next_rings; + next_rings += adpt->num_queue; + adpt->rx_rings = next_rings; + adpt->tg_rings = adpt->rx_rings + adpt->num_queue; + + /* allocate memory for q_vector pointers */ + size = sizeof(struct ne6x_q_vector *) * adpt->num_q_vectors; + adpt->q_vectors = kzalloc(size, GFP_KERNEL); + if (!adpt->q_vectors) { + kfree(adpt->tx_rings); + ret = -ENOMEM; + goto err_rings; + } + + /* Setup default MSIX irq handler for adapter */ + ne6x_adpt_setup_irqhandler(adpt, ne6x_msix_clean_rings); + ret = 0; + +err_rings: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +static int ne6x_force_link_state(struct ne6x_adapter *adpt, bool is_up) +{ + int err; + + err = ne6x_aq_get_phy_capabilities(adpt, is_up, true); + if (err) + return err; + + if (is_up) + test_and_set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return 0; +} + +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int i, pf_q; + + pf_q = adpt->base_queue; + for (i = 0; i < adpt->num_queue; i++, pf_q++) { + if (pf_q < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_VP_RELOAD), enable); + else + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_VP_RELOAD), + enable); + + usleep_range(1000, 2000); + if (!enable) { + ne6x_tail_update(adpt->rx_rings[i], 0); + ne6x_tail_update(adpt->tx_rings[i], 0); + } + } + + return 0; +} + +int ne6x_adpt_configure(struct ne6x_adapter *adpt) +{ + int err; + int i; + + err = ne6x_adpt_restart_vp(adpt, true); + if (!err) + err = ne6x_adpt_configure_tx(adpt); + + if (!err) + err = ne6x_adpt_configure_cq(adpt); + + if (!err) + err = ne6x_adpt_configure_rx(adpt); + + if (!err) + err = ne6x_adpt_restart_vp(adpt, false); + + if (!err) { + for (i = 0; i < adpt->num_queue && !err; i++) + ne6x_alloc_rx_buffers(adpt->rx_rings[i], + NE6X_DESC_UNUSED(adpt->rx_rings[i])); + } + + return err; +} + +static void ne6x_napi_enable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_enable(&q_vector->napi); + } +} + +static int ne6x_up_complete(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_adpt_configure_msix(adpt); + + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); + ne6x_napi_enable_all(adpt); + ne6x_adpt_enable_irq(adpt); + + if ((adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) && adpt->netdev) { + ne6x_print_link_message(adpt, true); + netif_tx_start_all_queues(adpt->netdev); + netif_carrier_on(adpt->netdev); + } + + /* On the next run of the service_task, notify any clients of the new + * opened netdev + */ + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); + ne6x_linkscan_schedule(pf); + + return 0; +} + +static void ne6x_napi_disable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_disable(&q_vector->napi); + } +} + +static void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static void ne6x_clean_tx_ring(struct ne6x_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buf) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + ne6x_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_buf[i]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); +} + +static void ne6x_clean_rx_ring(struct ne6x_ring *rx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buf) + return; + + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[i]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, rx_bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; +} + +static void ne6x_clean_cq_ring(struct ne6x_ring *cq_ring) +{ + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + + cq_ring->next_to_clean = 0; + cq_ring->next_to_use = 0; +} + +void ne6x_down(struct ne6x_adapter *adpt) +{ + int i; + + /* It is assumed that the caller of this function + * sets the adpt->comm.state NE6X_ADPT_DOWN bit. + */ + if (adpt->netdev) { + netif_carrier_off(adpt->netdev); + netif_tx_disable(adpt->netdev); + } + + ne6x_adpt_disable_irq(adpt); + ne6x_adpt_restart_vp(adpt, true); + ne6x_force_link_state(adpt, false); + ne6x_napi_disable_all(adpt); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_clean_tx_ring(adpt->tx_rings[i]); + ne6x_clean_cq_ring(adpt->cq_rings[i]); + ne6x_clean_rx_ring(adpt->rx_rings[i]); + } +} + +static void ne6x_free_rx_resources(struct ne6x_ring *rx_ring) +{ + ne6x_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_rx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (!adpt->rx_rings) + return; + + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->rx_rings[i] && adpt->rx_rings[i]->desc) + ne6x_free_rx_resources(adpt->rx_rings[i]); + } +} + +static void ne6x_free_tx_resources(struct ne6x_ring *tx_ring) +{ + ne6x_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +static void ne6x_free_cq_resources(struct ne6x_ring *cq_ring) +{ + ne6x_clean_cq_ring(cq_ring); + if (cq_ring->desc) { + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_tx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tx_rings[i] && adpt->tx_rings[i]->desc) + ne6x_free_tx_resources(adpt->tx_rings[i]); + kfree(adpt->tx_rings[i]->sgl); + } + } + + if (adpt->cq_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->cq_rings[i] && adpt->cq_rings[i]->desc) + ne6x_free_cq_resources(adpt->cq_rings[i]); + } + } + + if (adpt->tg_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tg_rings[i] && adpt->tg_rings[i]->desc) + /* tg_ring == cq_ring */ + ne6x_free_cq_resources(adpt->tg_rings[i]); + } + } +} + +int ne6x_up(struct ne6x_adapter *adpt) +{ + int err; + + ne6x_force_link_state(adpt, true); + + err = ne6x_adpt_configure(adpt); + if (!err) + err = ne6x_up_complete(adpt); + + return err; +} + +int ne6x_adpt_open(struct ne6x_adapter *adpt) +{ + char int_name[NE6X_INT_NAME_STR_LEN]; + struct ne6x_pf *pf = adpt->back; + int err; + + /* allocate descriptors */ + err = ne6x_adpt_setup_tx_resources(adpt); + if (err) + goto err_setup_tx; + + err = ne6x_adpt_setup_rx_resources(adpt); + if (err) + goto err_setup_rx; + + err = ne6x_adpt_configure(adpt); + if (err) + goto err_setup_rx; + + if (adpt->netdev) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), + adpt->netdev->name); + err = ne6x_adpt_request_irq(adpt, int_name); + if (err) + goto err_setup_rx; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + + /* When reducing the number of Tx queues, any pre-existing + * skbuffs might target a now removed queue. Older versions of + * the Linux kernel do not check for this, and it can result + * in a kernel panic. Avoid this by flushing all skbs now, so + * that we avoid attempting to transmit one that has an + * invalid queue mapping. + */ + qdisc_reset_all_tx_gt(adpt->netdev, 0); + + err = netif_set_real_num_rx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + } else { + err = -EINVAL; + goto err_setup_rx; + } + + err = ne6x_up_complete(adpt); + if (err) + goto err_up_complete; + + ne6x_dev_set_tx_rx_state(adpt, true, true); + return 0; + +err_up_complete: + ne6x_down(adpt); +err_set_queues: + ne6x_adpt_free_irq(adpt); +err_setup_rx: + ne6x_adpt_free_rx_resources(adpt); +err_setup_tx: + ne6x_adpt_free_tx_resources(adpt); + + return err; +} + +int ne6x_open(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + set_bit(NE6X_ADPT_OPEN, adpt->comm.state); + + netif_carrier_off(netdev); + + if (ne6x_force_link_state(adpt, true)) + return -EAGAIN; + + err = ne6x_adpt_open(adpt); + if (err) + return err; + + ne6x_sync_features(netdev); + + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_UP); + + return 0; +} + +void ne6x_adpt_close(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_dev_set_tx_rx_state(adpt, false, false); + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_down(adpt); + + ne6x_adpt_free_irq(adpt); + ne6x_adpt_free_tx_resources(adpt); + ne6x_adpt_free_rx_resources(adpt); + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); +} + +int ne6x_close(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + clear_bit(NE6X_ADPT_OPEN, adpt->comm.state); + adpt->current_isup = false; + adpt->current_speed = NE6X_LINK_SPEED_UNKNOWN; + ne6x_adpt_close(adpt); + if (test_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags)) + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_DOWN); + + netdev_info(netdev, "close !!!\n"); + + return 0; +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + WARN_ON(in_interrupt()); + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) + usleep_range(1000, 2000); + + ne6x_down(adpt); + ne6x_up(adpt); + clear_bit(NE6X_CONFIG_BUSY, pf->state); +} + +static int ne6x_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + NE6X_PACKET_HDR_PAD; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + if (adpt->back->num_alloc_vfs == 0) + ne6x_adpt_reinit_locked(adpt); + } + + return 0; +} + +static void ne6x_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + unsigned int hung_queue = 0; + u64 head, intr, tail; + + hung_queue = txqueue; + tx_ring = adpt->tx_rings[hung_queue]; + pf->tx_timeout_count++; + + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; /* reset after some time */ + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ + + /* don't kick off another recovery if one is already pending */ + if (test_and_set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state)) + return; + + if (tx_ring) { + if (tx_ring->reg_idx < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_SQ_HD_POINTER)); + /* Read interrupt register */ + intr = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_VP_INT)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, + NE6X_SQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + intr = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + } + + netdev_info(netdev, "tx_timeout: adapter: %u, Q: %u, NTC: 0x%x, HEAD: 0x%llx, NTU: 0x%x, TAIL: 0x%llx, INTR: 0x%llx\n", + adpt->idx, hung_queue, tx_ring->next_to_clean, head, + tx_ring->next_to_use, tail, intr); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + set_bit(NE6X_PF_RESET_REQUESTED, pf->state); + set_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + break; + case 2: + set_bit(NE6X_CORE_RESET_REQUESTED, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(NE6X_DOWN_REQUESTED, pf->state); + set_bit(NE6X_ADPT_DOWN_REQUESTED, adpt->comm.state); + break; + } + + ne6x_service_event_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +static void ne6x_get_netdev_stats_struct_tx(struct ne6x_ring *ring, struct rtnl_link_stats64 *stats) +{ + u64 bytes, packets; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; +} + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt) +{ + return &adpt->net_stats; +} + +static void ne6x_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct rtnl_link_stats64 *adpt_stats = ne6x_get_adpt_stats_struct(adpt); + struct ne6x_ring *tx_ring, *rx_ring; + u64 bytes, packets; + unsigned int start; + int i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + if (!adpt->tx_rings) + return; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + tx_ring = READ_ONCE(adpt->tx_rings[i]); + if (!tx_ring) + continue; + + ne6x_get_netdev_stats_struct_tx(tx_ring, stats); + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + adpt_stats->rx_dropped = 0; + rcu_read_unlock(); + + /* following stats updated by ne6x_watchdog_subtask() */ + stats->multicast = adpt_stats->multicast; + stats->tx_errors = adpt_stats->tx_errors; + stats->tx_dropped = adpt_stats->tx_dropped; + stats->rx_errors = adpt_stats->rx_errors; + stats->rx_dropped = adpt_stats->rx_dropped; + stats->rx_crc_errors = adpt_stats->rx_crc_errors; + stats->rx_length_errors = adpt_stats->rx_length_errors; +} + +void ne6x_update_pf_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ons; + struct rtnl_link_stats64 *ns; /* netdev stats */ + struct ne6x_eth_stats *oes; + struct ne6x_eth_stats *es; /* device's eth stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u32 tx_restart, tx_busy; + u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; + struct vf_stat vf_stat; + u64 tx_linearize; + u64 tx_force_wb; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u64 tx_e, rx_e; + u64 rx_l, rx_c; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + ons = &adpt->net_stats_offsets; + es = &adpt->eth_stats; + oes = &adpt->eth_stats_offsets; + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + rx_e = 0; + tx_e = 0; + rx_c = 0; + rx_l = 0; + tx_force_wb = 0; + tx_linearize = 0; + tx_busy = 0; + tx_restart = 0; + rx_page = 0; + rx_buf = 0; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + /* locate Tx ring */ + tx_ring = READ_ONCE(adpt->tx_rings[i]); + + do { + start = u64_stats_fetch_begin(&tx_ring->syncp); + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); + + tx_b += bytes; + tx_p += packets; + tx_restart += tx_ring->tx_stats.restart_q; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_linearize += tx_ring->tx_stats.tx_linearize; + tx_e += tx_ring->tx_stats.csum_err + tx_ring->tx_stats.tx_drop_addr + + tx_ring->tx_stats.tx_pcie_read_err; + + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + rx_b += bytes; + rx_p += packets; + rx_buf += rx_ring->rx_stats.alloc_buf_failed; + rx_page += rx_ring->rx_stats.alloc_page_failed; + rx_e += rx_ring->rx_stats.csum_err + rx_ring->rx_stats.rx_err + + rx_ring->rx_stats.rx_mem_error; + rx_l += rx_ring->rx_stats.rx_mem_error; + } + + rcu_read_unlock(); + + adpt->tx_restart = tx_restart; + adpt->tx_busy = tx_busy; + adpt->rx_page_failed = rx_page; + adpt->rx_buf_failed = rx_buf; + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + ns->tx_errors = tx_e; + ns->rx_errors = rx_e; + ns->rx_length_errors = rx_l; + ns->rx_crc_errors = rx_c; + + ns->rx_dropped = 0; + ne6x_dev_get_vf_stat(adpt, &vf_stat); + es->rx_broadcast = vf_stat.rx_broadcast_pkts; + es->rx_miss = vf_stat.rx_drop_pkts; + es->rx_multicast = vf_stat.rx_multicast_pkts; + es->rx_unicast = vf_stat.rx_unicast_pkts; + es->tx_broadcast = vf_stat.tx_broadcast_pkts; + es->tx_multicast = vf_stat.tx_multicast_pkts; + es->tx_unicast = vf_stat.tx_unicast_pkts; + es->rx_malform = vf_stat.rx_malform_pkts; + es->tx_malform = vf_stat.tx_malform_pkts; +} + +static void ne6x_netpoll(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_msix_clean_rings(0, adpt->q_vectors[i]); +} + +static int ne6x_set_mac(struct net_device *netdev, void *p) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_mac_info *mac = &adpt->port_info->mac; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + if (ether_addr_equal(mac->perm_addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", mac->perm_addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + + ne6x_adpt_del_mac(adpt, mac->perm_addr, true); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(mac->perm_addr, addr->sa_data, netdev->addr_len); + ne6x_adpt_add_mac(adpt, mac->perm_addr, true); + ne6x_dev_set_port_mac(adpt, mac->perm_addr); + + return 0; +} + +static int ne6x_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged + * packets aren't pruned by the device's internal switch on Rx + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + + if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + return -EINVAL; + } + + return ret; +} + +static int ne6x_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Make sure VLAN delete is successful before updating VLAN + * information + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) + return ret; + + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + + return 0; +} + +static struct mac_addr_node *ne6x_find_addr(struct ne6x_adapter *adpt, + const u8 *macaddr, bool is_unicast) +{ + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (!macaddr) + return NULL; + + if (is_unicast) + addr_head = &adpt->uc_mac_addr; + else + addr_head = &adpt->mc_mac_addr; + + list_for_each_entry(addr_node, &addr_head->list, list) { + if (ether_addr_equal(macaddr, addr_node->addr)) + return addr_node; + } + + return NULL; +} + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + int rc = 0; + + if (!addr) + return -EINVAL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_multicast; + } + + mutex_lock(&addr_head->mutex); + + if (ne6x_find_addr(adpt, addr, is_unicast)) + goto out_unlock; + + /* Update MAC list value */ + addr_node = kzalloc(sizeof(*addr_node), GFP_KERNEL); + if (!addr_node) { + rc = -ENOMEM; + goto out_unlock; + } + + ether_addr_copy(addr_node->addr, addr); + list_add_tail(&addr_node->list, &addr_head->list); + /* Send the value of the updated MAC linked list to the SDK */ + ne6x_vc_cfg_mac(adpt, addr_node->addr); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return rc; +} + +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_multicast; + } + + mutex_lock(&addr_head->mutex); + addr_node = ne6x_find_addr(adpt, addr, is_unicast); + if (!addr_node) + goto out_unlock; + + list_del(&addr_node->list); + ne6x_vc_cfg_mac(adpt, addr_node->addr); + kfree(addr_node); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return 0; +} + +static int ne6x_mc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, false); +} + +static int ne6x_mc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, false); +} + +static int ne6x_uc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, true); +} + +static int ne6x_uc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, true); +} + +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); +} + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt) +{ + struct mac_addr_node *temp_node = NULL, *addr_node = NULL; + struct ne6x_vlan_filter *f = NULL, *temp_filter = NULL; + struct mac_addr_head *addr_head = NULL; + struct list_head temp_header; + int ret = 0; + + INIT_LIST_HEAD(&temp_header); + spin_lock_bh(&adpt->mac_vlan_list_lock); + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid) { + temp_filter = kzalloc(sizeof(*temp_filter), GFP_ATOMIC); + memcpy(temp_filter, f, sizeof(struct ne6x_vlan_filter)); + list_add_tail(&temp_filter->list, &temp_header); + } + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + list_for_each_entry_safe(f, temp_filter, &temp_header, list) { + if (f->vlan.vid) + ret |= ne6x_adpt_del_vlan(adpt, f->vlan); + + list_del(&f->list); + kfree(f); + } + + addr_head = &adpt->uc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_unicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + addr_head = &adpt->mc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_multicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + return ret; +} + +static void ne6x_set_rx_mode_task(struct work_struct *work) +{ + struct ne6x_adapter *adpt = container_of(work, struct ne6x_adapter, set_rx_mode_task); + struct net_device *netdev = adpt->netdev; + + /* Check for Promiscuous modes */ + if (netdev->flags & IFF_PROMISC) { + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + } else { + ne6x_dev_set_uc_promiscuous_enable(adpt, false); + ne6x_dev_set_mc_promiscuous_enable(adpt, false); + /* Check for All Multicast modes */ + if (netdev->flags & IFF_ALLMULTI) + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + else + __dev_mc_sync(netdev, ne6x_mc_addr_sync, ne6x_mc_addr_unsync); + } + + __dev_uc_sync(netdev, ne6x_uc_addr_sync, ne6x_uc_addr_unsync); +} + +static void ne6x_set_rx_mode(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return; + + queue_work(ne6x_wq, &adpt->set_rx_mode_task); +} + +static int ne6x_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return -1; + + return 0; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static netdev_features_t ne6x_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +static int ne6x_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + if (changed & NETIF_F_NTUPLE) { + if (features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + } + return ne6x_dev_set_features(adpt, value); +} + +static netdev_features_t ne6x_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +static int ne6x_link_speed_to_rate(int link_speed) +{ + switch (link_speed) { + case NE6X_LINK_SPEED_100GB: + return SPEED_100000; + case NE6X_LINK_SPEED_40GB: + return SPEED_40000; + case NE6X_LINK_SPEED_25GB: + return SPEED_25000; + case NE6X_LINK_SPEED_10GB: + return SPEED_10000; + default: + return SPEED_25000; + } +} + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate) +{ + if (!adpt) + return -EINVAL; + + if (min_tx_rate) { + dev_err(&adpt->back->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", + min_tx_rate, vf_id); + return -EINVAL; + } + + if (max_tx_rate > ne6x_link_speed_to_rate(adpt->port_info->phy.link_info.link_speed)) { + dev_err(&adpt->back->pdev->dev, "Invalid max tx rate (%d) (greater than link_speed) specified for VF %d.\n", + max_tx_rate, vf_id); + return -EINVAL; + } + + return 0; +} + +static struct ne6x_key_filter *ne6x_find_key(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + list_for_each_entry(f, &pf->key_filter_list, list) { + if (f->key.pi == key.pi && ether_addr_equal(f->key.mac_addr, key.mac_addr)) + return f; + } + + return NULL; +} + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f = NULL; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->key = key; + + list_add_tail(&f->list, &pf->key_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&pf->key_list_lock); + + return f; +} + +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&pf->key_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&pf->key_list_lock); + + return 0; +} + +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key_filter *f; + struct ne6x_key key; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + f = ne6x_add_key_list(adpt->back, key); + if (f->refcnt) + return -1; + + return 0; +} + +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key key; + int ret; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + ret = ne6x_del_key_list(adpt->back, key); + if (ret) + return -1; + + return 0; +} + +static struct ne6x_vlan_filter *ne6x_find_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = NULL; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adpt->vlan_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return f; +} + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&adpt->mac_vlan_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return 0; +} + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = ne6x_add_vlan_list(adpt, vlan); + + if (f->refcnt == 0) + ne6x_dev_vlan_add(adpt, &vlan); + + return 0; +} + +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + int ret; + + ret = ne6x_del_vlan_list(adpt, vlan); + if (ret == 0) + ne6x_dev_vlan_del(adpt, &vlan); + + return 0; +} + +static int ne6x_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos, __be16 vlan_proto) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + u16 local_vlan_proto = ntohs(vlan_proto); + u16 vid_temp = 0, tpid_temp = 0; + struct ne6x_vlan vlan; + struct ne6x_adapter *adpt; + struct device *dev; + struct ne6x_vf *vf; + int lport; + + dev = ne6x_pf_to_dev(pf); + + if (vf_id < 0 || vf_id >= pf->num_alloc_vfs / 2 || vlan_id >= (VLAN_N_VID - 1) || qos > 7) { + dev_err(dev, "Invalid Port VLAN parameters for VF %d,vlan ID %d, QoS %d\n", + vf_id, vlan_id, qos); + return -EINVAL; + } + + if (!ne6x_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { + dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", + local_vlan_proto); + return -EPROTONOSUPPORT; + } + + lport = ADPT_LPORT(np->adpt); + vf_id += (pf->num_alloc_vfs / 2) * lport; + + vf = ne6x_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + vf->port_vlan_info = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + if (vf->port_vlan_info.prio || vf->port_vlan_info.vid) + dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", + vlan_id, qos, local_vlan_proto, vf_id); + else + dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); + + adpt = vf->adpt; + + dev_info(dev, "%s: net_name:%s TPID:%08x vlan_id:%d qos:%d lport:%d vport:%d vlan_id:%d tpid:%04x %d\n", + __func__, netdev->name, local_vlan_proto, vlan_id, qos, ADPT_LPORT(adpt), + ADPT_VPORT(adpt), vf->port_vlan_info.vid, vf->port_vlan_info.tpid, vf->vfp_vid); + + vlan = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + + if (vlan.vid == 0) { + if (vf->vfp_tpid == vlan.tpid) { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } else { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + } else if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + vid_temp = vlan.vid; + tpid_temp = vlan.tpid; + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + + if (vf->vfp_vid == vid_temp) { + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + vlan.vid = vid_temp; + vlan.tpid = tpid_temp; + vid_temp = (qos << VLAN_PRIO_SHIFT) | (vlan.vid & VLAN_VID_MASK); + vf->vfp_vid = vf->port_vlan_info.vid; + vf->vfp_tpid = vf->port_vlan_info.tpid; + ne6x_dev_add_vf_qinq(vf, tpid_temp, vid_temp); + ne6x_adpt_add_vlan(vf->adpt, vlan); + } else { + return -EINVAL; + } + + return 0; +} + +static void *ne6x_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(ne6x_pf_to_dev(adpt->back), sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + ne6x_adpt_add_mac(adpt, mac, true); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adpt->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +static void ne6x_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!accel_priv) + return; + + ne6x_adpt_del_mac(adpt, mv->mac, true); + list_del(&mv->list); + devm_kfree(ne6x_pf_to_dev(adpt->back), mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6x_netdev_ops = { + .ndo_open = ne6x_open, + .ndo_stop = ne6x_close, + .ndo_start_xmit = ne6x_lan_xmit_frame, + .ndo_get_stats64 = ne6x_get_netdev_stats_struct, + .ndo_set_rx_mode = ne6x_set_rx_mode, + .ndo_set_mac_address = ne6x_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ne6x_change_mtu, + .ndo_tx_timeout = ne6x_tx_timeout, + .ndo_poll_controller = ne6x_netpoll, + .ndo_set_vf_rate = ne6x_ndo_set_vf_bw, + .ndo_set_tx_maxrate = ne6x_set_tx_maxrate, + .ndo_set_vf_mac = ne6x_set_vf_mac, + .ndo_get_vf_config = ne6x_get_vf_config, + .ndo_set_vf_trust = ne6x_set_vf_trust, + .ndo_set_vf_vlan = ne6x_set_vf_port_vlan, + .ndo_set_vf_link_state = ne6x_set_vf_link_state, + .ndo_vlan_rx_add_vid = ne6x_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6x_vlan_rx_kill_vid, + .ndo_set_features = ne6x_set_features, + .ndo_features_check = ne6x_features_check, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = ne6x_rx_flow_steer, +#endif + .ndo_tx_timeout = ne6x_tx_timeout, + .ndo_dfwd_add_station = ne6x_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6x_fwd_del_macvlan, + .ndo_fix_features = ne6x_fix_features, + .ndo_set_features = ne6x_set_features, +}; + +void ne6x_sync_features(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (netdev->features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + + if (netdev->features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + + if (netdev->features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + + ne6x_dev_set_features(adpt, value); +} + +static void ne6x_set_netdev_features(struct net_device *netdev) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + netdev_features_t vlano_features = 0u; + netdev_features_t csumo_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_NTUPLE | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + pf->hw.dvm_ena = 0x1; + + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; +} + +static int ne6x_config_netdev(struct ne6x_adapter *adpt) +{ + struct ne6x_rss_info *rss_info = &adpt->rss_info; + struct ne6x_pf *pf = adpt->back; + struct ne6x_netdev_priv *np; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int etherdev_size, index; + u8 mac_addr[ETH_ALEN]; + + if (pf->hw.bus.domain_num) + sprintf(name, "enP%dp%ds0f%d", + pf->hw.bus.domain_num, pf->hw.bus.bus_num, adpt->idx); + else + sprintf(name, "enp%ds0f%d", pf->hw.bus.bus_num, adpt->idx); + + etherdev_size = sizeof(struct ne6x_netdev_priv); + + netdev = alloc_netdev_mq(etherdev_size, name, NET_NAME_USER, ether_setup, adpt->num_queue); + if (!netdev) + return -ENOMEM; + + adpt->netdev = netdev; + np = netdev_priv(netdev); + np->adpt = adpt; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + netdev_rss_key_fill(rss_info->hash_key, sizeof(rss_info->hash_key)); + + for (index = 0; index < rss_info->ind_table_size; index++) + rss_info->ind_table[index] = ethtool_rxfh_indir_default(index, adpt->num_queue); + + ne6x_dev_set_rss(adpt, rss_info); /* end rss info */ + + ne6x_set_netdev_features(netdev); + + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + ether_addr_copy(mac_addr, adpt->port_info->mac.perm_addr); + eth_hw_addr_set(netdev, mac_addr); + ether_addr_copy(netdev->perm_addr, mac_addr); + + netdev->netdev_ops = &ne6x_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + ne6x_set_ethtool_ops(netdev); + +/* MTU range: 128 - 15342 */ + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - NE6X_PACKET_HDR_PAD - ETH_FCS_LEN; + netdev->gso_max_size = 65535; + netdev->needed_headroom = 32; + netdev->needed_tailroom = 32; + ne6x_dev_set_mtu(adpt, netdev->mtu); + ne6x_sync_features(netdev); + + return 0; +} + +static void ne6x_map_vector_to_qp(struct ne6x_adapter *adpt, int v_idx, int qp_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = adpt->tx_rings[qp_idx]; + struct ne6x_ring *rx_ring = adpt->rx_rings[qp_idx]; + struct ne6x_ring *cq_ring = adpt->cq_rings[qp_idx]; + struct ne6x_ring *tg_ring = adpt->tg_rings[qp_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; + tg_ring->q_vector = q_vector; + tg_ring->next = q_vector->cq.ring; + q_vector->tg.ring = tg_ring; + q_vector->tg.count++; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt) +{ + int q_vectors = adpt->num_q_vectors; + int qp_remaining = adpt->num_queue; + struct ne6x_q_vector *q_vector; + int num_ringpairs; + int v_start = 0; + int qp_idx = 0; + + /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to + * group them so there are multiple queues per vector. + * It is also important to go through all the vectors available to be + * sure that if we don't use all the vectors, that the remaining vectors + * are cleared. This is especially important when decreasing the + * number of queues in use. + */ + for (; v_start < q_vectors; v_start++) { + q_vector = adpt->q_vectors[v_start]; + + num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + + q_vector->num_ringpairs = num_ringpairs; + q_vector->reg_idx = q_vector->v_idx + adpt->base_vector; + + q_vector->rx.count = 0; + q_vector->tx.count = 0; + q_vector->cq.count = 0; + q_vector->tg.count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + q_vector->cq.ring = NULL; + q_vector->tg.ring = NULL; + + while (num_ringpairs--) { + ne6x_map_vector_to_qp(adpt, v_start, qp_idx); + qp_idx++; + qp_remaining--; + } + } +} + +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ns; + int i; + + if (!adpt) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + memset(ns, 0, sizeof(*ns)); + memset(&adpt->net_stats_offsets, 0, sizeof(adpt->net_stats_offsets)); + memset(&adpt->eth_stats, 0, sizeof(adpt->eth_stats)); + memset(&adpt->eth_stats_offsets, 0, sizeof(adpt->eth_stats_offsets)); + + if (adpt->rx_rings && adpt->rx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + memset(&adpt->rx_rings[i]->stats, 0, + sizeof(adpt->rx_rings[i]->stats)); + memset(&adpt->rx_rings[i]->rx_stats, 0, + sizeof(adpt->rx_rings[i]->rx_stats)); + memset(&adpt->rx_rings[i]->cq_stats, 0, + sizeof(adpt->rx_rings[i]->cq_stats)); + memset(&adpt->tx_rings[i]->stats, 0, + sizeof(adpt->tx_rings[i]->stats)); + memset(&adpt->tx_rings[i]->tx_stats, 0, + sizeof(adpt->tx_rings[i]->tx_stats)); + } + } +} + +static int ne6x_adpt_setup(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt = NULL; + u32 is_write_proterct = false; + struct ne6x_hw *hw = &pf->hw; + int i, ret = 0; + u32 value; + + /* PF + VP */ + pf->adpt = kcalloc(NE6X_MAX_VP_NUM + 4, sizeof(*pf->adpt), GFP_KERNEL); + if (!pf->adpt) + return -ENOMEM; + + ne6x_dev_get_norflash_write_protect(pf, &is_write_proterct); + + /* Need to protect the allocation of the adapters at the PF level */ + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + struct ne6x_vlan vlan = {0}; + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + pf->adpt[i] = adpt; + adpt->idx = i; + adpt->vport = NE6X_PF_VP0_NUM + i; /*vport*/ + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + clear_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags); + clear_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags); + clear_bit(NE6X_ADPT_F_ACL, adpt->flags); + + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + INIT_WORK(&adpt->set_rx_mode_task, ne6x_set_rx_mode_task); + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + INIT_LIST_HEAD(&adpt->macvlan_list); + init_waitqueue_head(&adpt->recv_notify); + + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) { + ret = -ENOMEM; + goto err_portinfo; + } + + adpt->port_info->lport = i; /* logical port */ + adpt->port_info->hw_trunk_id = i; + adpt->port_info->hw_port_id = ne6x_dev_get_pport(adpt); + adpt->port_info->queue = pf->hw.max_queue; + adpt->port_info->hw_max_queue = adpt->port_info->queue; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * i; + adpt->comm.port_info = adpt->port_info->lport | (adpt->vport << 8); + adpt->port_info->hw = hw; + adpt->port_info->phy.curr_user_speed_req = 0x0; + + ne6x_dev_get_mac_addr(adpt, adpt->port_info->mac.perm_addr); + ne6x_set_num_rings_in_adpt(adpt); + + ret = ne6x_adpt_mem_alloc(pf, adpt); + if (ret) + goto err_netdev; + + ret = ne6x_config_netdev(adpt); + if (ret) + goto err_configdev; + + /* The unicast MAC address delivers the SDK */ + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(adpt, vlan); + ne6x_adpt_add_mac(adpt, adpt->port_info->mac.perm_addr, true); + ne6x_dev_add_broadcast_leaf(adpt); + + /* set up vectors and rings if needed */ + ret = ne6x_adpt_setup_vectors(adpt); + if (ret) + goto err_msix; + + ret = ne6x_alloc_rings(adpt); + if (ret) + goto err_rings; + + ne6x_init_arfs(adpt); + + ret = ne6x_set_cpu_rx_rmap(adpt); + if (ret) + netdev_info(adpt->netdev, "adpt rx rmap err: %d", ret); + + /* map all of the rings to the q_vectors */ + ne6x_adpt_map_rings_to_vectors(adpt); + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_port2pi(adpt); + ne6x_dev_set_pi2port(adpt); + ne6x_dev_set_vport(adpt); + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + adpt = pf->adpt[i]; + ret = ne6x_adpt_register_netdev(adpt); + if (ret) + goto err_configdev; + + adpt->netdev_registered = true; + netif_carrier_off(adpt->netdev); + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(adpt->netdev); + } + + return ret; + +err_rings: + ne6x_adpt_free_q_vectors(adpt); +err_msix: + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + unregister_netdev(adpt->netdev); + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } +err_configdev: + kfree(adpt->tx_rings); + kfree(adpt->q_vectors); +err_netdev: + kfree(adpt->port_info); +err_portinfo: + kfree(adpt); + + return ret; +} + +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt) +{ + int ret; + + ret = register_netdev(adpt->netdev); + if (ret) { + struct net_device *device = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + char name[IFNAMSIZ] = {0}; + + sprintf(name, "enp%ds0f%%d", pf->hw.bus.bus_num); + strcpy(device->name, name); + return register_netdev(adpt->netdev); + } + + return ret; +} + +static void ne6x_adjust_adpt_port_max_queue(struct ne6x_pf *pf) +{ + int cpu_num = num_online_cpus(); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + pf->hw.expect_vp = pf->irq_pile->num_entries / pf->hw.pf_port; + /* actal max vp queue */ + pf->hw.max_queue = min_t(int, cpu_num, pf->hw.expect_vp); + dev_info(&pf->pdev->dev, "%s:hw->expect_vp = %d hw->max_queue = %d cpu_num = %d\n", + __func__, pf->hw.expect_vp, pf->hw.max_queue, cpu_num); + } +} + +static int ne6x_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6x_pf *pf; + struct ne6x_hw *hw; + u32 ioremap_len; + int err; + + if (PCI_FUNC(pdev->devfn) != 1) + return 0; + + /* initialize device for use with memory space */ + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + /* set up pci connections */ + err = pci_request_mem_regions(pdev, ne6x_driver_name); + if (err) { + dev_info(&pdev->dev, "pci_request_mem_regions failed %d\n", err); + goto err_pci_reg; + } + pci_set_master(pdev); + /* Now that we have a PCI connection, we need to do the + * low level device setup. This is primarily setting up + * the Admin Queue structures and then querying for the + * device's current profile information. + */ + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_pf_alloc; + } + pf->next_adpt = 0; + pf->pdev = pdev; + pci_set_drvdata(pdev, pf); + set_bit(NE6X_DOWN, pf->state); + + hw = &pf->hw; + hw->back = pf; + + ioremap_len = pci_resource_len(pdev, 0); + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), ioremap_len); + if (!hw->hw_addr0) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar0 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 0), ioremap_len, err); + goto err_ioremap_hw_addr0; + } + + ioremap_len = pci_resource_len(pdev, 2); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), ioremap_len); + if (!hw->hw_addr2) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar2 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 2), ioremap_len, err); + goto err_ioremap_hw_addr2; + } + + ioremap_len = pci_resource_len(pdev, 4); + hw->hw_addr4 = ioremap(pci_resource_start(pdev, 4), ioremap_len); + if (!hw->hw_addr4) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar4 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 4), ioremap_len, err); + goto err_ioremap_hw_addr4; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + hw->bus.domain_num = pci_domain_nr(pdev->bus); + hw->bus.bus_num = pdev->bus->number; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + + usleep_range(10, 20); + + mutex_init(&pf->mbus_comm_mutex); + if (ne6x_dev_init(pf)) { + err = -EIO; + dev_info(&pdev->dev, "sdk init failed!\n"); + goto error_sdk_init_failed; + } + usleep_range(10, 20); + + pci_save_state(pdev); + + /* hardware resource initialization */ + err = ne6x_hw_init(hw); + if (err) + goto err_unroll_alloc; + + /* driver private resource initialization */ + err = ne6x_pf_init(pf); + if (err) + goto err_pf_reset; + + /* interrupt resource initialization */ + err = ne6x_init_interrupt_scheme(pf); + if (err) + goto err_interrupt_scheme; + + ne6x_adjust_adpt_port_max_queue(pf); + + err = ne6x_adpt_setup(pf); + if (err) + goto err_adpts; + + ne6x_dev_set_nic_start(pf, 0); + add_timer(&pf->linkscan_tmr); + ne6x_enable_link_irq(pf); + pcie_print_link_status(pdev); + /* ready to go, so clear down state bit */ + clear_bit(NE6X_DOWN, pf->state); + return 0; + +err_adpts: + set_bit(NE6X_DOWN, pf->state); + ne6x_clear_interrupt_scheme(pf); +err_interrupt_scheme: + del_timer_sync(&pf->serv_tmr); +err_pf_reset: + devm_kfree(ne6x_hw_to_dev(hw), hw->port_info); + hw->port_info = NULL; +err_unroll_alloc: +error_sdk_init_failed: + iounmap(hw->hw_addr4); +err_ioremap_hw_addr4: + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; +err_ioremap_hw_addr2: + iounmap(hw->hw_addr0); +err_ioremap_hw_addr0: + kfree(pf); +err_pf_alloc: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors) +{ + /* free the ring and vector containers */ + if (free_qvectors) { + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + } + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + adpt->rx_rings = NULL; + adpt->cq_rings = NULL; +} + +static int ne6x_adpt_clear(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + /* updates the PF for this cleared adpt */ + ne6x_adpt_free_arrays(adpt, true); + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +static int ne6x_adpt_release(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_key_filter *klf, *klftmp; + struct ne6x_macvlan *mv, *mv_tmp; + struct ne6x_pf *pf = adpt->back; + + if (!test_bit(NE6X_DOWN, pf->state)) { + dev_info(&pf->pdev->dev, "Can't remove PF adapter\n"); + return -ENODEV; + } + + set_bit(NE6X_ADPT_RELEASING, adpt->comm.state); + + ne6x_remove_arfs(adpt); + ne6x_adpt_clear_ddos(pf); + ne6x_adpt_clear_mac_vlan(adpt); + ne6x_dev_del_broadcast_leaf(adpt); + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + spin_lock_bh(&adpt->back->key_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(klf, klftmp, &adpt->back->key_filter_list, list) { + list_del(&klf->list); + kfree(klf); + } + spin_unlock_bh(&adpt->back->key_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adpt->macvlan_list, list) + ne6x_fwd_del_macvlan(adpt->netdev, mv); + + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + if (adpt->netdev) + /* results in a call to i40e_close() */ + unregister_netdev(adpt->netdev); + } + + ne6x_free_cpu_rx_rmap(adpt); + ne6x_adpt_disable_irq(adpt); + + /* clear the sync flag on all filters */ + if (adpt->netdev) { + __dev_uc_unsync(adpt->netdev, NULL); + __dev_mc_unsync(adpt->netdev, NULL); + } + + ne6x_adpt_free_q_vectors(adpt); + if (adpt->netdev) { + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } + + /*add for lldp*/ + ne6x_dev_set_fw_lldp(adpt, false); + ne6x_adpt_clear_rings(adpt); + ne6x_adpt_clear(adpt); + + return 0; +} + +static void ne6x_remove(struct pci_dev *pdev) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_hw *hw = &pf->hw; + int i; + + if (PCI_FUNC(pdev->devfn) != 1) + return; + + ne6x_proc_pf_exit(pf); + ne6x_dbg_pf_exit(pf); + + ne6x_dev_set_nic_stop(pf, 0); + +#ifdef CONFIG_PCI_IOV + if (pf->num_alloc_vfs) { + set_bit(NE6X_REMOVE, pf->state); + ne6x_sriov_configure(pdev, 0); + } +#endif + + /* no more scheduling of any task */ + set_bit(NE6X_DOWN, pf->state); + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + if (pf->linkscan_tmr.function) + del_timer_sync(&pf->linkscan_tmr); + + if (pf->linkscan_work.func) + cancel_work_sync(&pf->linkscan_work); + + /* Now we can shutdown the PF's adapter, just before we kill + * adminq and hmc. + */ + for (i = 0; i < pf->num_alloc_adpt; i++) + ne6x_adpt_release(pf->adpt[i]); + + /* Clear all dynamic memory lists of rings, q_vectors, and adapters */ + rtnl_lock(); + ne6x_clear_interrupt_scheme(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) { + ne6x_adpt_clear_rings(pf->adpt[i]); + ne6x_adpt_clear(pf->adpt[i]); + pf->adpt[i] = NULL; + } + } + rtnl_unlock(); + + kfree(pf->adpt); + + iounmap(hw->hw_addr4); + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; + iounmap(hw->hw_addr0); + kfree(pf); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver ne6x_driver = { + .name = ne6x_driver_name, + .id_table = ne6x_pci_tbl, + .probe = ne6x_probe, + .remove = ne6x_remove, + .sriov_configure = ne6x_sriov_configure, +}; + +static int __init ne6x_init_module(void) +{ + pr_info("%s: %s - version %s\n", ne6x_driver_name, ne6x_driver_string, + ne6x_driver_version_str); + pr_info("%s: %s\n", ne6x_driver_name, ne6x_copyright); + + ne6x_wq = create_singlethread_workqueue(ne6x_driver_name); + if (!ne6x_wq) { + pr_err("%s: Failed to create workqueue\n", ne6x_driver_name); + return -ENOMEM; + } + + ne6x_dbg_init(); + ne6x_proc_init(); + ne6x_netlink_init(); + + return pci_register_driver(&ne6x_driver); +} + +module_init(ne6x_init_module); + +static void __exit ne6x_exit_module(void) +{ + pci_unregister_driver(&ne6x_driver); + destroy_workqueue(ne6x_wq); + ne6x_netlink_exit(); + ne6x_proc_exit(); + ne6x_dbg_exit(); +} + +module_exit(ne6x_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c new file mode 100644 index 00000000000000..9401675c8e126b --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_debugfs.h" +#include "ne6x_dev.h" +#include "ne6x_netlink.h" + +static struct sock *ne6x_nlsock; +static DEFINE_MUTEX(ne6x_msg_mutex); + +static int ne6x_netlink_tab_add(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + u32 table_id = 0xFFFFFFFF; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_search(pf, table_info->table, &table_info->data[0], + table_info->size, NULL, table_info->size); + if (err == -ENOENT) { + table_info->size = NE6X_HASH_KEY_SIZE + NE6X_HASH_DATA_SIZE; + err = ne6x_reg_table_insert(pf, table_info->table, &table_info->data[0], + table_info->size, &table_id); + } else { + dev_info(dev, "table exist\n"); + kfree(table_info); + return -EEXIST; + } + + if (err == 0) { + dev_info(dev, "insert rule_id = 0x%x success!\n", table_id); + } else if (err != -ETIMEDOUT) { + dev_info(dev, "insert rule_id = 0x%x fail!\n", table_id); + err = -EIO; + } else { + dev_info(dev, "insert rule_id = 0x%x timeout!\n", table_id); + err = EAGAIN; + } + + kfree(table_info); + return err; +} + +static int ne6x_netlink_tab_del(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_delete(pf, table_info->table, &table_info->data[0], table_info->size); + dev_info(dev, "%s: %s\n", __func__, (err == 0) ? "success!" : "timeout!"); + kfree(table_info); + + return err; +} + +static int ne6x_netlink_meter_write(struct ne6x_pf *pf, struct ne6x_meter *meter) +{ + struct meter_table vf_bw; + struct device *dev; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + u32 type_flag = 0; + u32 type_map = 0; + u32 cir; + int err; + + if (meter->type_num > NE6X_METER_TYPE_MAX || + meter->opcode > NE6X_METER_OPCODE_MAX) + return -EINVAL; + + dev = ne6x_pf_to_dev(pf); + type_flag |= BIT(meter->type_num); + + err = ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type_map); + if (err) + return err; + + if (meter->opcode) + type_map |= type_flag; + else + type_map &= ~type_flag; + + err = ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type_map); + if (err) + return err; + + cir = meter->value * 1000 + 1023; + cir = min(cir / 1024, cir_maxnum); + + vf_bw.cir = cir; + vf_bw.pir = min(cir + cir / 10, cir_maxnum); + + vf_bw.cbs = min(vf_bw.cir * 10000, cbs_maxnum); + vf_bw.pbs = min(vf_bw.pir * 10000, cbs_maxnum); + + err = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + meter->type_num, (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(dev, "%s\n", err ? "write meter fail!" : "write meter success!"); + + return err; +} + +static int ne6x_netlink_rcv_msg(struct nlmsghdr *nlh) +{ + char name[IFNAMSIZ] = {0}; + struct net_device *dev; + struct ne6x_pf *pf; + void *data; + int err; + + strncpy(name, nlmsg_data(nlh), IFNAMSIZ - 1); + dev = __dev_get_by_name(&init_net, name); + if (unlikely(!dev)) + return -ENODEV; + + if (unlikely(!netif_is_ne6x(dev))) + return -ENOTSUPP; + + pf = ne6x_netdev_to_pf(dev); + data = nlmsg_data(nlh) + IFNAMSIZ; + + switch (nlh->nlmsg_type) { + case NE6X_NLMSG_TAB_ADD: + /* if entry exists, treat it as insertion success */ + err = ne6x_netlink_tab_add(pf, data); + if (err == -EEXIST) + err = 0; + break; + case NE6X_NLMSG_TAB_DEL: + err = ne6x_netlink_tab_del(pf, data); + break; + case NE6X_NLMSG_METER_WRITE: + err = ne6x_netlink_meter_write(pf, data); + break; + default: + return -ENOTSUPP; + } + + return err; +} + +static void ne6x_netlink_ack(struct sk_buff *in_skb, unsigned long *status) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + size_t payload; + + payload = BITS_TO_LONGS(NE6X_RULE_BATCH_MAX) * sizeof(unsigned long); + skb_out = nlmsg_new(payload, GFP_KERNEL); + if (unlikely(!skb_out)) { + NETLINK_CB(in_skb).sk->sk_err = ENOBUFS; + NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk); + return; + } + + nlh = nlmsg_put(skb_out, NETLINK_CB(in_skb).portid, 0, NLMSG_DONE, payload, 0); + if (unlikely(!nlh)) { + nlmsg_free(skb_out); + return; + } + + NETLINK_CB(skb_out).dst_group = 0; + bitmap_copy(nlmsg_data(nlh), status, NE6X_RULE_BATCH_MAX); + + nlmsg_unicast(in_skb->sk, skb_out, NETLINK_CB(in_skb).portid); +} + +static void ne6x_netlink_rcv(struct sk_buff *skb) +{ + DECLARE_BITMAP(status, NE6X_RULE_BATCH_MAX); + u32 idx = 0; + + bitmap_zero(status, NE6X_RULE_BATCH_MAX); + mutex_lock(&ne6x_msg_mutex); + while (skb->len >= nlmsg_total_size(0) && idx < NE6X_RULE_BATCH_MAX) { + struct nlmsghdr *nlh; + int msglen, err; + + nlh = nlmsg_hdr(skb); + + if (unlikely(nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)) { + set_bit(idx, status); + goto skip; + } + + err = ne6x_netlink_rcv_msg(nlh); + if (err) + set_bit(idx, status); + +skip: + msglen = NLMSG_ALIGN(nlh->nlmsg_len); + if (unlikely(msglen > skb->len)) + msglen = skb->len; + + idx++; + skb_pull(skb, msglen); + } + + ne6x_netlink_ack(skb, status); + mutex_unlock(&ne6x_msg_mutex); +} + +/** + * ne6x_netlink_init - start up netlink resource for the driver + **/ +void ne6x_netlink_init(void) +{ + struct netlink_kernel_cfg ne6x_netlink_cfg = { + .input = ne6x_netlink_rcv, + }; + + ne6x_nlsock = netlink_kernel_create(&init_net, NE6X_NETLINK, &ne6x_netlink_cfg); + if (unlikely(!ne6x_nlsock)) + pr_warn("Init of netlink failed\n"); +} + +/** + * ne6x_netlink_exit - clean out the driver's netlink resource + **/ +void ne6x_netlink_exit(void) +{ + netlink_kernel_release(ne6x_nlsock); + ne6x_nlsock = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h new file mode 100644 index 00000000000000..61a6cd1347bde0 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_NETLINK_H +#define _NE6X_NETLINK_H + +#define NE6X_NETLINK 31 +#define NE6X_HASH_KEY_SIZE 64 +#define NE6X_HASH_DATA_SIZE 64 +#define NE6X_RULE_BATCH_MAX 64 +#define NE6X_METER_TYPE_MAX 8 +#define NE6X_METER_OPCODE_MAX 1 +#define NE6X_ADDR_LEN 16 + +/* netlink message opcodes */ +enum { + NE6X_NLMSG_BASE = 0x10, /* the type < 0x10 is reserved for control messages */ + NE6X_NLMSG_TAB_ADD = NE6X_NLMSG_BASE, + NE6X_NLMSG_TAB_DEL, + NE6X_NLMSG_METER_WRITE, + NE6X_NLMSG_MAX +}; + +struct ne6x_rule { + u8 dst[NE6X_ADDR_LEN]; + u8 src[NE6X_ADDR_LEN]; + u32 proto; +} __packed; + +struct ne6x_meter { + u8 type_num; + u8 opcode; + u32 value; +} __packed; + +void ne6x_netlink_init(void); +void ne6x_netlink_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h new file mode 100644 index 00000000000000..b60470095d9902 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PORTMAP_H +#define _NE6X_PORTMAP_H + +#include +#include + +#define PBMP_DWORD_NUM 4 +#define PBMP_WORD_WIDTH 32 + +typedef u32 pbmp_t[PBMP_DWORD_NUM]; + +#define SET_BIT(DAT, POS) ((DAT) |= ((u32)0x1 << (POS))) +#define CLR_BIT(DAT, POS) ((DAT) &= (~((u32)0x01 << (POS)))) + +#define PBMP_DWORD_GET(bm, word) ((bm)[(word)]) +#define PBMP_CLEAR(bm) \ + (PBMP_DWORD_GET(bm, 0) = PBMP_DWORD_GET(bm, 1) = \ + PBMP_DWORD_GET(bm, 2) = \ + PBMP_DWORD_GET(bm, 3) = 0) + +#define PBMP_WNET(port) ((port) / PBMP_WORD_WIDTH) +#define PBMP_WBIT(port) (1LU << ((port) % PBMP_WORD_WIDTH)) + +#define PBMP_ENTRY(bm, port) \ + (PBMP_DWORD_GET(bm, PBMP_WNET(port))) + +#define PBMP_PORT_REMOVE(bm, port) \ + (PBMP_ENTRY(bm, port) &= ~(PBMP_WBIT(port))) + +#define PBMP_PORT_ADD(bm, port) \ + (PBMP_ENTRY(bm, port) |= PBMP_WBIT(port)) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c new file mode 100644 index 00000000000000..743ab28e803c13 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" + +static struct proc_dir_entry *ne6x_proc_root; + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct ne6x_soc_temperature temp = {0}; + struct ne6x_soc_power power = {0}; + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char *info = NULL; + ssize_t len = 0; + int err; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + info = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!info) + return -ENOMEM; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_get_temperature_info(pf, &temp); + if (err) { + dev_err(dev, "get device temperature failed\n"); + } else { + len += sprintf(info, "Chip temperature (°C) %d\n", temp.chip_temerature); + len += sprintf(info + len, "Nic temerature (°C) %d\n", temp.board_temperature); + } + + err = ne6x_dev_get_power_consum(pf, &power); + if (err) { + dev_err(dev, "get device power failed\n"); + } else { + len += sprintf(info + len, "Current (A) %d.%03d\n", + power.cur / 1000, power.cur % 1000); + len += sprintf(info + len, "Voltage (V) %d.%03d\n", + power.vol / 1000, power.vol % 1000); + len += sprintf(info + len, "Power (W) %d.%03d\n", + power.power / 1000, power.power % 1000); + } + + if (!len) { + kfree(info); + return len; + } + + if (copy_to_user(buf, info, len)) { + kfree(info); + return -EFAULT; + } + + *ppos = len; + kfree(info); + return len; +} + +static ssize_t ne6x_proc_i2c_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char info[512] = {0}; + ssize_t len = 0; + u32 id = 0; + int err; + + if (*ppos > 0 || count < 512) + return 0; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_i2c3_signal_test(pf, &id); + if (err) + dev_err(dev, "get device i2c external info failed\n"); + else + len += sprintf(info, "I2c external sig test %d\n", id & 0xff); + + if (!len) + return len; + + if (copy_to_user(buf, info, len)) + return -EFAULT; + + *ppos = len; + return len; +} + +static int ne6x_tps_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static int ne6x_i2c_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static const struct proc_ops ne6x_proc_tps_fops = { + .proc_open = ne6x_tps_open, + .proc_read = ne6x_proc_tps_read, +}; + +static const struct proc_ops ne6x_proc_i2c_fops = { + .proc_open = ne6x_i2c_open, + .proc_read = ne6x_proc_i2c_read, +}; + +void ne6x_proc_pf_init(struct ne6x_pf *pf) +{ + struct proc_dir_entry *pfile = NULL; + const struct device *dev = NULL; + const char *name = NULL; + + name = pci_name(pf->pdev); + dev = &pf->pdev->dev; + pf->ne6x_proc_pf = proc_mkdir(name, ne6x_proc_root); + if (!pf->ne6x_proc_pf) { + dev_err(dev, "proc dir %s create failed\n", name); + return; + } + + pfile = proc_create_data("temperature_power_state", 0600, pf->ne6x_proc_pf, + &ne6x_proc_tps_fops, pf); + if (!pfile) { + dev_err(dev, "proc file temperature_power_state create failed\n"); + goto create_failed; + } + + pfile = proc_create_data("i2c_test", 0600, pf->ne6x_proc_pf, &ne6x_proc_i2c_fops, pf); + if (!pfile) { + dev_err(dev, "proc file i2c_test create failed\n"); + goto create_failed; + } + + return; + +create_failed: + proc_remove(pf->ne6x_proc_pf); +} + +void ne6x_proc_pf_exit(struct ne6x_pf *pf) +{ + proc_remove(pf->ne6x_proc_pf); + pf->ne6x_proc_pf = NULL; +} + +extern char ne6x_driver_name[]; +void ne6x_proc_init(void) +{ + ne6x_proc_root = proc_mkdir(ne6x_driver_name, NULL); + if (!ne6x_proc_root) + pr_info("init of proc failed\n"); +} + +void ne6x_proc_exit(void) +{ + proc_remove(ne6x_proc_root); + ne6x_proc_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h new file mode 100644 index 00000000000000..d4ce94cab66b28 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PROCFS_H +#define _NE6X_PROCFS_H + +struct ne6x_pf; + +void ne6x_proc_pf_init(struct ne6x_pf *pf); +void ne6x_proc_pf_exit(struct ne6x_pf *pf); +void ne6x_proc_init(void); +void ne6x_proc_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c new file mode 100644 index 00000000000000..5d1089d1679765 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c @@ -0,0 +1,1573 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_dev.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" + +#define AXIA_MBUS_READ_MEMORY_COMMAND 0x07 +#define AXIA_MBUS_READ_MEMORY_ACK 0x08 + +#define AXIA_MBUS_WRITE_MEMORY_COMMAND 0x09 +#define AXIA_MBUS_WRITE_MEMORY_ACK 0x0A + +#define AXIA_MBUS_READ_REGISTER_COMMAND 0x0B +#define AXIA_MBUS_READ_REGISTER_ACK 0x0C + +#define AXIA_MBUS_WRITE_REGISTER_COMMAND 0x0D +#define AXIA_MBUS_WRITE_REGISTER_ACK 0x0E + +#define AXIA_MBUS_RESET_FIRMWARE_COMMAND 0x0F +#define AXIA_MBUS_RESET_FIRMWARE_ACK 0x10 +#define AXIA_MBUS_READ_TABLE_COMMAND 0x11 +#define AXIA_MBUS_READ_TABLE_ACK 0x12 + +#define AXIA_MBUS_WRITE_TABLE_COMMAND 0x13 +#define AXIA_MBUS_WRITE_TABLE_ACK 0x14 + +#define AXIA_MBUS_CLEARUP_COMMAND 0x15 +#define AXIA_MBUS_CLEARUP_ACK 0x16 + +/* hash table operator */ +#define AXIA_MBUS_INSERT_COMMAND 0x17 +#define AXIA_MBUS_INSERT_ACK 0x18 + +#define AXIA_MBUS_UPDATE_COMMAND 0x19 +#define AXIA_MBUS_UPDATE_ACK 0x1A + +#define AXIA_MBUS_DELETE_COMMAND 0x1B +#define AXIA_MBUS_DELETE_ACK 0x1C + +#define AXIA_MBUS_LOOKUP_COMMAND 0x1D +#define AXIA_MBUS_LOOKUP_ACK 0x1E + +/* data download operator */ +#define AXIA_MBUS_DOWNLOAD_COMMAND 0x21 +#define AXIA_MBUS_DOWNLOAD_ACK 0x22 + +#define AXIA_MBUS_OPERATOR_COMMAND 0x23 +#define AXIA_MBUS_OPERATOR_ACK 0x24 + +#define AXIA_MBUS_SETUP_PORT_COMMAND 0x25 +#define AXIA_MBUS_SETUP_PORT_ACK 0x26 + +#define AXIA_MBUS_SETUP_TABLE_COMMAND 0x27 +#define AXIA_MBUS_SETUP_TABLE_ACK 0x28 + +#define AXIA_MBUS_SETUP_TAPI_COMMAND 0x29 +#define AXIA_MBUS_SETUP_TAPI_ACK 0x2A + +#define AXIA_MBUS_SETUP_HASH_COMMAND 0x2B +#define AXIA_MBUS_SETUP_HASH_ACK 0x2C + +#define AXIA_MBUS_SETUP_DTAB_COMMAND 0x2D +#define AXIA_MBUS_SETUP_DTAB_ACK 0x2E + +#define AXIA_MBUS_E2PROM_READ_COMMAND 0x2F +#define AXIA_MBUS_E2PROM_READ_ACK 0x30 + +#define AXIA_MBUS_E2PROM_WRITE_COMMAND 0x31 +#define AXIA_MBUS_E2PROM_WRITE_ACK 0x32 + +#define AXIA_MBUS_SET_FAN_SPEED_COMMAND 0x33 +#define AXIA_MBUS_SET_FAN_SPEED_ACK 0x34 + +#define AXIA_MBUS_GET_FAN_SPEED_COMMAND 0x35 +#define AXIA_MBUS_GET_FAN_SPEED_ACK 0x36 + +#define AXIA_MBUS_GET_SYSTEM_INFO_COMMAND 0x37 +#define AXIA_MBUS_GET_SYSTEM_INFO_ACK 0x38 + +#define AXIA_MBUS_UPGRADE_PRE_COMMAND 0x39 +#define AXIA_MBUS_UPGRADE_PRE_COMMAND_ACK 0x3A +#define AXIA_MBUS_UPGRADE_COMMAND 0x3B +#define AXIA_MBUS_UPGRADE_COMMAND_ACK 0x3C + +#define AXIA_MBUS_GET_VER_COMMAND 0x3D +#define AXIA_MBUS_GET_VER_COMMAND_ACK 0x3E + +#define AXIA_MBUS_TALK_PORT_BASE 0x41 + +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 0) +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 2) +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 0) +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 2) +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 0) +#define AXIA_MBUS_TALK_SET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 1) + +#define AXIA_MBUS_TALK_GET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 2) +#define AXIA_MBUS_TALK_GET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 0) +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 2) +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 3) + +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 0) +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 1) + +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 2) +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 3) + +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 0) +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 1) + +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 2) +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 3) + +#define AXIA_MBUS_TALK_SET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 0) +#define AXIA_MBUS_TALK_SET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 1) + +#define AXIA_MBUS_TALK_GET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 2) +#define AXIA_MBUS_TALK_GET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 0) +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 2) +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 0) +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 2) +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 0) +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 2) +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 3) + +#define AXIA_MBUS_SET_NIC_START_COMMAND 0x9F +#define AXIA_MBUS_SET_NIC_START_ACK 0xA0 +#define AXIA_MBUS_SET_NIC_STOP_COMMAND 0xA1 +#define AXIA_MBUS_SET_NIC_STOP_ACK 0xA2 +#define AXIA_MBUS_GET_NIC_STATE_COMMAND 0xA3 +#define AXIA_MBUS_GET_NIC_STATE_ACK 0xA4 +#define AXIA_MBUS_SET_NP_USERDATA_COMMAND 0xA5 +#define AXIA_MBUS_SET_NP_USERDATA_ACK 0xA6 +#define AXIA_MBUS_GET_NP_USERDATA_COMMAND 0xA7 +#define AXIA_MBUS_GET_NP_USERDATA_ACK 0xA8 + +#define AXIA_MBUS_SET_LED_STATE_COMMAND 0xA9 +#define AXIA_MBUS_SET_LED_STATE_ACK 0xAA + +#define AXIA_MBUS_CONFIG_METER_COMMAND 0xAB +#define AXIA_MBUS_CONFIG_METER_ACK 0xAC + +#define AXIA_MBUS_CLEAR_CREDIT_COMMAND 0xAD +#define AXIA_MBUS_CLEAR_CREDIT_ACK 0xAE + +#define AXIA_MBUS_SET_FAST_L2FDB_COMMAND 0xD1 +#define AXIA_MBUS_SET_FAST_L2FDB_ACK 0xD2 + +#define AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND 0xD3 +#define AXIA_MBUS_GET_DUMP_DATA_LEN_ACK 0xD4 + +#define AXIA_MBUS_GET_DUMP_DATA_COMMAND 0xD5 +#define AXIA_MBUS_GET_DUMP_DATA_ACK 0xD6 + +#define AXIA_MBUS_CLR_TABLE_COMMAND 0xD7 +#define AXIA_MBUS_CLR_TABLE_ACK 0xD8 + +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND 0xD9 +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_ACK 0xDA + +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND 0xDB +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_ACK 0xDC + +#define AXIA_MBUS_OPT_NOFLASH_COMMAND 0xDD +#define AXIA_MBUS_OPT_NOFLASH_ACK 0xDE + +#define PCIE2C810_SHM_MBUS_BASE 0x20878000 +#define PCIE2C810_SHM_DATA_BASE 0x20878004 + +#define MEM_ONCHIP_64BIT 0x00 +#define MEM_ONCHIP_512BIT 0x01 +#define MEM_ONXDDR_512BIT 0x04 + +enum engine_idx { + ENGINE_DIRECT_TABLE0 = 0x1, + ENGINE_DIRECT_TABLE1, + ENGINE_HASHA_TABLE, + ENGINE_HASHB_TABLE, +}; + +struct axia_mbus_msg { + union { + u32 uint; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 opcode : 8; + u32 dst_block : 4; + u32 src_block : 4; + u32 data_len : 14; + u32 e : 2; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 e : 2; + u32 data_len : 14; + u32 src_block : 4; + u32 dst_block : 4; + u32 opcode : 8; +#endif + } bits; + } hdr; + u32 data[]; +} __packed; + +struct ne6x_diag_reg_test_info ne6x_reg_list[] = { + /* offset mask elements stride */ + {NE6X_VP_BASE_ADDR, 0xFFFFFFFFFFFFFFFF, NE6X_VP_INT, 0}, + {0} +}; + +struct ne6x_reg_table_info { + u32 addr; /* engine id as base address */ + u32 size; /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u32 opcode_read; + u32 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u32 opcode_insert; + u32 opcode_delete; + u32 opcode_lookup; + u32 opcode_update; + u32 size_insert; + u32 size_delete; + u32 size_lookup; + u32 size_update; +}; + +static struct ne6x_reg_table_info table_info[] = { + /* address size(tableidx + memtype + bucket + entry_num + size) + * read write adv_cmd insert delete lookup size_insert size_delete size_lookup + */ + {0x00000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0200, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x10000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x20000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0010, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x30000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0008, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x40000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (4 << 16) | 0x0100, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x50000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_512BIT << 24) | (1 << 21) | (1 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x60000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x70000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 96, 64, 64, 32}, +}; + +#define TABLE_ADDR(table) (table_info[table].addr & 0xF0000000) +#define TABLE_SIZE(table) (table_info[table].size & 0x00000FFF) +#define TABLE_XMEM(table) (table_info[table].size & 0xFFE00000) +#define TABLE_XNUM(table) ((table_info[table].size >> 16) & 0xF) + +#define TABLE_OPCODE_WRITE(table) (table_info[table].opcode_write & 0x3F) +#define TABLE_OPCODE_READ(table) (table_info[table].opcode_read & 0x3F) +#define TABLE_ADVCMD_VALID(table) (table_info[table].advanced_cmd == 0x01) +#define TABLE_OPCODE_INSERT(table) (table_info[table].opcode_insert & 0x3F) +#define TABLE_OPCODE_DELETE(table) (table_info[table].opcode_delete & 0x3F) +#define TABLE_OPCODE_LOOKUP(table) (table_info[table].opcode_lookup & 0x3F) + +#define TABLE_OPCODE_UPDATE(table) (table_info[table].opcode_update & 0x3F) + +#define TABLE_SIZE_INSERT(table) (table_info[table].size_insert) +#define TABLE_SIZE_DELETE(table) (table_info[table].size_delete) +#define TABLE_SIZE_LOOKUP(table) (table_info[table].size_lookup) +#define TABLE_SIZE_UPDATE(table) (table_info[table].size_update) +#define TABLE_SIZE_LOOKUP_RET(table) (table_info[table].size & 0xFFF) + +#define NUM_TABLE(table) (table_info[table].table_num) + +static u64 local_module_base; + +static void ne6x_reg_lock(struct ne6x_pf *pf) +{ + mutex_lock(&pf->mbus_comm_mutex); +} + +static void ne6x_reg_unlock(struct ne6x_pf *pf) +{ + mutex_unlock(&pf->mbus_comm_mutex); +} + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + writeq(reg_value, addr); +} + +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + u64 val = 0; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + val = readq(addr); + + return val; +} + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + ne6x_switch_pci_write(pf->hw.hw_addr4, base_addr, offset_addr, reg_value); +} + +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr) +{ + return ne6x_switch_pci_read(pf->hw.hw_addr4, base_addr, offset_addr); +} + +#define BAR4_CSR_OFFSET 0x3C0 +static u32 ne6x_reg_axi_read(struct ne6x_pf *pf, u32 offset) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30); + + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + + return ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0) & 0xFFFFFFFFUL; +} + +static void ne6x_reg_axi_write(struct ne6x_pf *pf, u32 offset, u32 value) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30) + value; + + reg_offset = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); +} + +static u32 _reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 offset_l = 0x27A00000 | ((offset << 4) & 0xFFFF0); + u32 offset_h; + u32 data = 0; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA1B2C3D4); + } + + data = ne6x_reg_axi_read(pf, offset_l); + + return data; +} + +static void _reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + u32 offset_l; + u32 offset_h; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA2B2C3D4); + } + + offset_l = 0x2FA00000 | ((offset << 4) & 0xFFFF0); + ne6x_reg_axi_write(pf, offset_l, value); +} + +u32 NE6X_ACCESS_TIMEOUT = 9999; +static int _ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + struct axia_mbus_msg resp; + int timeout = 0, index = 0; + + memset(&resp, 0, sizeof(resp)); + + /* Write Command(s) */ + for (index = 0; index < len; index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + + usleep_range(200, 300); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + + /* resp opcode is even number, request opcode is odd number */ + if ((resp.hdr.bits.opcode & 0x01) == 0x0) + break; + + timeout++; + usleep_range(200, 220); + } + + if (timeout >= NE6X_ACCESS_TIMEOUT) { + dev_info(ne6x_pf_to_dev(pf), "%s: timeout! (%d)\n", __func__, timeout); + return -ETIMEDOUT; + } + + if (resp.hdr.bits.e == 1) { + dev_info(ne6x_pf_to_dev(pf), "%s: response.bits.e = 1 !\n", __func__); + return -EAGAIN; + } + + if (!pbuf) + return 0; + + for (index = 0; index < retlen; index++) + pbuf[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index); + + return 0; +} + +static int ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + int status; + + ne6x_reg_lock(pf); + status = _ne6x_reg_perform(pf, data, pbuf, len, retlen); + ne6x_reg_unlock(pf); + + return status; +} + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 data; + + ne6x_reg_lock(pf); + data = _reg_apb_read(pf, offset); + ne6x_reg_unlock(pf); + + return data; +} + +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + ne6x_reg_lock(pf); + _reg_apb_write(pf, offset, value); + ne6x_reg_unlock(pf); +} + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_READ_REGISTER_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = addr; + + status = ne6x_reg_perform(pf, (u32 *)msg, value, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_WRITE_REGISTER_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = addr; + msg->data[1] = value; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +static bool ne6x_reg_valid_table(struct ne6x_pf *pf, enum ne6x_reg_table table) +{ + if (pf->hw_flag != 0) { + if (table > NE6X_REG_ARFS_TABLE) + return false; + } else { + if (table > NE6X_REG_VF_BW_TABLE) + return false; + } + + return true; +} + +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (size % TABLE_SIZE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_READ(table)); + msg->hdr.bits.data_len = 12; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)data, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table)) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_WRITE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + size / 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id) +{ + struct axia_mbus_msg *msg; + int status, count; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_INSERT(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + count = size / TABLE_SIZE_INSERT(table); + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_INSERT(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_INSERT(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, table_id, 3 + (size >> 2), + (!table_id) ? 0 : count); + kfree(msg); + + return status; +} + +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (TABLE_SIZE_DELETE(table) != size) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_DELETE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_LOOKUP(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_LOOKUP(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_LOOKUP_RET(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, ret_data, 3 + (size >> 2), ret_size / 4); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +static int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 index, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_UPDATE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_UPDATE(table)); + msg->hdr.bits.data_len = 16 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = index; + msg->data[2] = TABLE_SIZE_UPDATE(table); + memcpy((void *)&msg->data[3], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (size >> 2), 0); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, + int port, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (((size % 4) != 0) || size > 512) + return -EINVAL; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (AXIA_MBUS_TALK_PORT_BASE + 4 * talk + 2 * opcode); + msg->hdr.bits.data_len = 8 + size; + msg->data[0] = port; + if (pbuf) + memcpy(&msg->data[1], pbuf, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, (opcode == NE6X_TALK_GET) ? pbuf : NULL, + 2 + ((opcode == NE6X_TALK_GET) ? 0 : (size >> 2)), + (opcode == NE6X_TALK_GET) ? (size >> 2) : 0); + kfree(msg); + + return status; +} + +int ne6x_reg_reset_firmware(struct ne6x_pf *pf) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_RESET_FIRMWARE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 1, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_READ_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = offset; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 1024) + size = 1024; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_WRITE_COMMAND; + msg->hdr.bits.data_len = 12 + (size / 4) * 4; + msg->data[0] = (offset); + msg->data[1] = (size); + memcpy((void *)&msg->data[1], (void *)pbuf, (ssize_t)size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)speed, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = speed; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = class_type; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)ret, 3, size >> 2); + kfree(msg); + + return status; +} + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = 4; + msg->data[1] = port; + msg->data[2] = mode; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +#define NE6X_FW_MAX_FRG_SIZE (4 * 1024) +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size) +{ + struct axia_mbus_msg *msg; + int offset = 0, left_size = 0, frag_size = 0; + int status = 0; + + msg = kzalloc(NE6X_FW_MAX_FRG_SIZE + 16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + ne6x_reg_lock(pf); + /* scile begin */ + NE6X_ACCESS_TIMEOUT = 100000; + left_size = size; + while (left_size) { + frag_size = (left_size >= NE6X_FW_MAX_FRG_SIZE) ? NE6X_FW_MAX_FRG_SIZE : left_size; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_UPGRADE_COMMAND; + msg->hdr.bits.data_len = 12 + frag_size; + msg->data[0] = region; /* region */ + msg->data[1] = frag_size; /* size */ + memcpy(&msg->data[2], data + offset, frag_size); + + status |= _ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (frag_size >> 2), 0); + if (status) + goto err_upgrade; + + left_size -= frag_size; + offset += frag_size; + } + +err_upgrade: + /* scile end */ + NE6X_ACCESS_TIMEOUT = 999; + ne6x_reg_unlock(pf); + kfree(msg); + + return status; +} + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version) +{ + struct axia_mbus_msg *msg; + u32 *out_buffer = (u32 *)version; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_VER_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, out_buffer, 1, + sizeof(struct ne6x_firmware_ver_info) / sizeof(u32)); + kfree(msg); + + return status; +} + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, u32 offset, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = port; + msg->data[1] = offset; + msg->data[2] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 4, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_START_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_STOP_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NIC_STATE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)state, 1, 1); + kfree(msg); + + return status; +} + +static int ne6x_reg_set_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = type; + msg->data[1] = data; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +static int ne6x_reg_get_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 4; + msg->data[0] = type; + + status = ne6x_reg_perform(pf, (u32 *)msg, data, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + return ne6x_reg_set_user_data_template(pf, type, data); +} + +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + int status = 0; + + status = ne6x_reg_get_user_data_template(pf, type, data); + + return status; +} + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_LED_STATE_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = port; + msg->data[1] = state; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CONFIG_METER_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = meter_id; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, u32 *data, + u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAST_L2FDB_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = index; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, size, 1, 1); + kfree(msg); + + return status; +} + +static void ne6x_reg_send(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 size) +{ + struct axia_mbus_msg *msg; + u32 *msg_data; + int index; + + msg = kzalloc(size + 12, GFP_KERNEL); + if (!msg) + return; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = cmd; + msg->hdr.bits.data_len = 4 + size; + memcpy((void *)&msg->data[0], (void *)data, size); + + msg_data = (u32 *)msg; + /* Write Command(s) */ + for (index = 0; index < ((size / 4) + 1); index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, msg_data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + usleep_range(1000, 1200); + kfree(msg); +} + +static int ne6x_reg_polling(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 buf_size, + u32 *real_size) +{ + int timeout = 0, offset = 0; + struct axia_mbus_msg resp; + int index, status; + + memset(&resp, 0, sizeof(resp)); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + if (resp.hdr.bits.opcode == cmd) + break; + + timeout++; + usleep_range(200, 220); + } + + status = (timeout >= NE6X_ACCESS_TIMEOUT) ? -ETIMEDOUT : 0; + status = (resp.hdr.bits.e == 1) ? -EAGAIN : status; + if (status) { + dev_info(ne6x_pf_to_dev(pf), "%s: cmd %d status (%d)\n", __func__, cmd, status); + return status; + } + + switch (cmd) { + case AXIA_MBUS_GET_DUMP_DATA_ACK: + *real_size = resp.hdr.bits.data_len - sizeof(resp) - sizeof(u32); + offset = sizeof(u32); + pf->dump_info = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE); + break; + default: + *real_size = resp.hdr.bits.data_len - sizeof(resp); + offset = 0; + break; + } + + if (*real_size > buf_size) + *real_size = buf_size; + + for (index = 0; index < (*real_size) / 4; index++) + data[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index + offset); + + return 0; +} + +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + u32 *temp_buff = data; + u32 left_size = size; + u32 real_size = 0; + + memset(&pf->dump_info, 0, sizeof(u32)); + + ne6x_reg_lock(pf); + while (left_size > 0) { + temp_buff += real_size / 4; + ne6x_reg_send(pf, AXIA_MBUS_GET_DUMP_DATA_COMMAND, (u32 *)&pf->dump_info, 4); + if (ne6x_reg_polling(pf, AXIA_MBUS_GET_DUMP_DATA_ACK, + temp_buff, left_size, &real_size)) { + ne6x_reg_unlock(pf); + return -EAGAIN; + } + + left_size -= real_size; + } + ne6x_reg_unlock(pf); + + return 0; +} + +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id) +{ + struct axia_mbus_msg *msg; + int status; + + if (!ne6x_reg_valid_table(pf, table_id)) + return -EINVAL; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + NE6X_ACCESS_TIMEOUT = 99999; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CLR_TABLE_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = table_id; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + NE6X_ACCESS_TIMEOUT = 9999; + + return status; +} + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = write_protect; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, p_write_protect, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16 + length; + msg->data[0] = NE6X_NORFLASH_OP_WRITE_E; + msg->data[1] = offset; + msg->data[2] = length; + memcpy((void *)&msg->data[3], (void *)pdata, length); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (length >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_ERASE_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_READ_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, p, 4, length >> 2); + kfree(msg); + + return status; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h new file mode 100644 index 00000000000000..cf8a7c5767a122 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_REG_H +#define _NE6X_REG_H + +#include + +struct ne6x_diag_reg_test_info { + u32 offset; /* the base register */ + u64 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +enum ne6x_reg_table { + NE6X_REG_RSS_TABLE = 0x0, + NE6X_REG_L2FDB_TABLE, + NE6X_REG_VLAN_TABLE, + NE6X_REG_MAC_LEARN_TABLE, + NE6X_REG_VF_STAT_TABLE, + NE6X_REG_VF_BW_TABLE, + NE6X_REG_ACL_TABLE, + NE6X_REG_ARFS_TABLE, + NE6X_REG_TABLE_LAST, +}; + +enum ne6x_reg_talk_port { + NE6X_MSG_PORT_ENABLE = 0, + NE6X_MSG_PORT_DUPLEX, + NE6X_MSG_PORT_SPEED, + NE6X_MSG_PORT_STATS, + NE6X_MSG_PORT_SFP_SPEED, + NE6X_MSG_PORT_FEC, + NE6X_MSG_PORT_SPEED_MAX, + NE6X_MSG_PORT_PAUSE, + NE6X_MSG_PORT_PAUSE_ADDR, + NE6X_MSG_PORT_LOOPBACK, + NE6X_MSG_PORT_MAX_FRAME, + NE6X_MSG_PORT_AUTO_NEG, + NE6X_MSG_PORT_INFO, + NE6X_MSG_PORT_LINK_STATUS, + NE6X_MSG_PORT_DRV_I2C, + NE6X_MSG_PORT_SELF_TEST, + NE6X_MSG_PORT_SFP_TYPE_LEN, + NE6X_MSG_PORT_SFP_EEPROM, + NE6X_MSG_PORT_STATE, +}; + +enum ne6x_reg_talk_opcode { + NE6X_TALK_SET = 0, + NE6X_TALK_GET +}; + +extern struct ne6x_diag_reg_test_info ne6x_reg_list[]; + +struct table_info { + u32 addr; /* 00 - 27: max_size + * 28 - 31: engine_idx + */ + u32 size; + /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u16 opcode_read; + u16 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u16 opcode_insert; + u16 opcode_delete; + u16 opcode_update; + u16 opcode_search; + u16 size_insert; + u16 size_delete; + u16 size_search; + u16 size_update; +}; + +struct rss_table { + u32 resv; + u32 flag; + u32 hash_fun; /* 24-31, func, 23-1,type */ + u32 queue_base; + u16 queue_def; + u16 queue_size; + u16 entry_num; + u16 entry_size; + u8 entry_data[128]; + u8 hash_key[352]; + u8 resv1[8]; +}; + +struct l2fdb_dest_unicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 rsv[3]; + u32 vp_bmp[3]; + u32 cnt; /* leaf num */ + u8 resv3[44]; +}; + +struct l2fdb_dest_multicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 resv3[3]; + u32 vp_bmp[3]; + u8 resv4[48]; +}; + +struct l2fdb_search_result { + u32 key_index; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; +}; + +struct l2fdb_table { + u8 resv1; + u8 pport; + u8 mac[6]; + u32 vlanid; + u8 resv2[52]; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; /* forward info */ +}; + +struct l2fdb_fast_table { + u8 mac[6]; + u8 start_cos; + u8 cos_num; +}; + +struct meter_table { + u32 cir; + u32 cbs; + u32 pir; + u32 pbs; +}; + +enum np_user_data { + NP_USER_DATA_HW_FEATURES = 0, + NP_USER_DATA_HW_FLAGS = 1, + NP_USER_DATA_RSS_TABLE_SIZE = 2, + NP_USER_DATA_RSS_TABLE_ENTRY_WIDTH = 3, + NP_USER_DATA_RSS_HASH_KEY_BLOCK_SIZE = 4, + NP_USER_DATA_PORT2PI_0 = 5, + NP_USER_DATA_PI2PORT_0 = 25, + NP_USER_DATA_VLAN_TYPE = 33, + NP_USER_DATA_RSV_0 = 34, + NP_USER_DATA_RSV_1 = 35, + NP_USER_DATA_RSV_2 = 36, + NP_USER_DATA_PI0_BROADCAST_LEAF = 37, + NP_USER_DATA_PORT_OLFLAGS_0 = 53, + NP_USER_DATA_PORT_2_COS_0 = 121, + NP_USER_DATA_VPORT0_LINK_STATUS = 155, + NP_USER_DATA_TSO_CKSUM_DISABLE = 156, + NP_USER_DATA_PORT0_MTU = 157, + NP_USER_DATA_PORT0_QINQ = 161, + NP_USER_DATA_CQ_SIZE = 229, + NP_USER_DATA_FAST_MODE = 230, + NP_USER_DATA_SUB_FLAG = 231, + NP_USER_DATA_DDOS_FLAG = 242, + NP_USER_DATA_END = 255, +}; + +struct ne6x_diag_reg_info { + u32 address; + u32 value; +}; + +enum { + NE6X_NORFLASH_OP_WRITE_E = 0, + NE6X_NORFLASH_OP_READ_E = 1, + NE6X_NORFLASH_OP_ERASE_E = 2, + NE6X_NORFLASH_OP_E_END, +}; + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, + u32 offset_addr, u64 reg_value); +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr); + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); +int ne6x_reg_reset_firmware(struct ne6x_pf *pf); +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value); +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value); +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id); +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size); +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size); + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed); +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed); + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size); +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, int port, + void *pbuf, int size); +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size); + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version); + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, + u32 offset, int size); + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag); + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state); + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data); +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data); + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state); +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size); + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode); + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, + u32 *data, u32 size); +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id); + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata); +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length); +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c new file mode 100644 index 00000000000000..bb70698eefecd4 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_txrx.h" +#include "ne6x_reg.h" + +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) { + err = ne6x_setup_tx_descriptors(adpt->tx_rings[i]); + err = ne6x_setup_tg_descriptors(adpt->tg_rings[i]); + err = ne6x_setup_cq_descriptors(adpt->cq_rings[i]); + err = ne6x_setup_tx_sgl(adpt->tx_rings[i]); + } + + return err; +} + +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_setup_rx_descriptors(adpt->rx_rings[i]); + + return err; +} + +static inline void ne6x_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + u64 val = 1ULL << NE6X_VP_CQ_INTSHIFT; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) { + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT), val); + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), ~(val)); + } else { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT), val); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), ~(val)); + } + } +} + +int ne6x_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + ring = q_vector->cq.ring; + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings && adpt->tx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + kfree_rcu(adpt->tx_rings[i], rcu); + adpt->tx_rings[i] = NULL; + adpt->rx_rings[i] = NULL; + adpt->cq_rings[i] = NULL; + } + } +} + +int ne6x_alloc_rings(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *ring; + int i, qpv = 4; + + /* Set basic values in the rings to be used later during open() */ + for (i = 0; i < adpt->num_queue; i++) { + /* allocate space for both Tx and Rx in one shot */ + ring = kcalloc(qpv, sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_out; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tx_desc; + ring->size = 0; + adpt->tx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_cq_desc; + ring->size = 0; + adpt->cq_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_rx_desc; + ring->size = 0; + adpt->rx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tg_desc; + ring->size = 0; + adpt->tg_rings[i] = ring; + } + + return 0; + +err_out: + ne6x_adpt_clear_rings(adpt); + return -ENOMEM; +} + +static int ne6x_configure_tx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_sq_base_addr sq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_sq_cfg sq_cfg; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (pf_q < NE6X_PF_VP0_NUM) { + sq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG), sq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV mode PF Config */ + sq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR), + sq_base_addr.val); + + sq_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_CFG), sq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + for (i = 0; (i < adpt->num_queue) && !err; i++) + err = ne6x_configure_tx_ring(adpt->tx_rings[i]); + + return err; +} + +static int ne6x_configure_cq_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_cq_base_addr cq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_cq_cfg cq_cfg; + + /* SRIOV enabled VF config OR SRIOV disabled PF config */ + if (pf_q < NE6X_PF_VP0_NUM) { + cq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG), cq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr0 + + (NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } else { + /* SRIOV enable PF config */ + cq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_BASE_ADDR), + cq_base_addr.val); + + cq_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG), + cq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr4 + + (NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } + + return 0; +} + +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_cq_ring(adpt->cq_rings[i]); + + return 0; +} + +static int ne6x_configure_rx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_rq_base_addr rq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_rq_cfg rc_cfg; + u16 rxmax = 0; + + ring->rx_buf_len = adpt->rx_buf_len; + + if (pf_q < NE6X_PF_VP0_NUM) { + rq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG), rc_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV enabled PF Config */ + rq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_BASE_ADDR), + rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG), + rq_block_cfg.val); + + rc_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_CFG), rc_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + adpt->max_frame = NE6X_MAX_RXBUFFER; + adpt->rx_buf_len = (PAGE_SIZE < 8192) ? NE6X_RXBUFFER_4096 : NE6X_RXBUFFER_4096; + + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_rx_ring(adpt->rx_rings[i]); + + return err; +} + +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_ring *tx_ring = adpt->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = adpt->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen = 4; + int nsg; + bool jumbo_frame = true; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + /* single packet add 4 byte to CRC */ + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(adpt->netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(adpt->netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h new file mode 100644 index 00000000000000..b09563cfc4e35f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_TXRX_H +#define _NE6X_TXRX_H + +int ne6x_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c new file mode 100644 index 00000000000000..c7c8910c6065ba --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c @@ -0,0 +1,2392 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_interrupt.h" + +#ifdef CONFIG_PCI_IOV + +static void ne6x_clear_vf_status(struct ne6x_vf *vf) +{ + struct ne6x_flowctrl flowctrl; + + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + ne6x_dev_set_flowctrl(vf->adpt, &flowctrl); + ne6x_dev_set_vf_bw(vf->adpt, 0); +} + +static void ne6x_mbx_deinit_snapshot(struct ne6x_hw *hw) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Free VF counter array and reset vf counter length */ + kfree(snap->mbx_vf.vf_cntr); + snap->mbx_vf.vfcntr_len = 0; +} + +static int ne6x_mbx_init_snapshot(struct ne6x_hw *hw, u16 vf_count) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Ensure that the number of VFs allocated is non-zero and + * is not greater than the number of supported VFs defined in + * the functional capabilities of the PF. + */ + if (!vf_count || vf_count > NE6X_MAX_VP_NUM) + return 1; + + snap->mbx_vf.vf_cntr = kcalloc(vf_count, sizeof(*snap->mbx_vf.vf_cntr), GFP_KERNEL); + if (!snap->mbx_vf.vf_cntr) + return 1; + + /* Setting the VF counter length to the number of allocated + * VFs for given PF's functional capabilities. + */ + snap->mbx_vf.vfcntr_len = vf_count; + snap->state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + memset(hw->ne6x_mbx_ready_to_send, true, 64); + + return 0; +} + +static int ne6x_status_to_errno(int err) +{ + if (err) + return -EINVAL; + + return 0; +} + +static void ne6x_set_vf_state_qs_dis(struct ne6x_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + clear_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states); +} + +static void ne6x_dis_vf_qs(struct ne6x_vf *vf) +{ + ne6x_set_vf_state_qs_dis(vf); +} + +static bool ne6x_is_reset_in_progress(unsigned long *state) +{ + return test_bit(NE6X_PF_RESET_REQUESTED, state) || + test_bit(NE6X_RESET_INTR_RECEIVED, state) || + test_bit(NE6X_CORE_RESET_REQUESTED, state) || + test_bit(NE6X_GLOBAL_RESET_REQUESTED, state); +} + +static void ne6x_adpt_close_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); +} + +static int ne6x_adpt_clear_vf(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + + kfree(adpt->port_info); + adpt->port_info = NULL; + + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +static int ne6x_adpt_release_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + struct ne6x_pf *pf; + + if (!adpt->back) + return -ENODEV; + + pf = adpt->back; + + if (adpt->netdev && !ne6x_is_reset_in_progress(pf->state) && + (test_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state))) { + unregister_netdev(adpt->netdev); + clear_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state); + } + + ne6x_adpt_close_vf(adpt, vf_id); + + if (!ne6x_is_reset_in_progress(pf->state)) + ne6x_adpt_clear_vf(adpt); + + return 0; +} + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf) +{ + return vf->pf->adpt[vf->lan_adpt_idx]; +} + +static void ne6x_vf_invalidate_adpt(struct ne6x_vf *vf) +{ + vf->lan_adpt_idx = NE6X_NO_ADPT; +} + +static void ne6x_vf_adpt_release(struct ne6x_vf *vf) +{ + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + ne6x_adpt_release_vf(ne6x_get_vf_adpt(vf), vf->vf_id); + ne6x_vf_invalidate_adpt(vf); +} + +static void ne6x_free_vf_res(struct ne6x_vf *vf) +{ + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's adapter after it's freed or invalidated. + */ + clear_bit(NE6X_VF_STATE_INIT, vf->vf_states); + + /* free adapter and disconnect it from the parent uplink */ + if (vf->lan_adpt_idx != NE6X_NO_ADPT) { + if (vf->tx_rate) { + ne6x_dev_set_vf_bw(ne6x_get_vf_adpt(vf), 0); + vf->tx_rate = 0; + } + + ne6x_vf_adpt_release(vf); + } +} + +static int ne6x_sriov_free_msix_res(struct ne6x_pf *pf) +{ + struct ne6x_lump_tracking *res; + + if (!pf) + return -EINVAL; + + res = pf->irq_pile; + if (!res) + return -EINVAL; + + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +static void ne6x_free_vfs(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + unsigned int tmp, i; + u64 reg; + + if (!pf->vf) + return; + + while (test_and_set_bit(NE6X_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); + + /* Avoid wait time by stopping all VFs at the same time */ + ne6x_for_each_vf(pf, i) { + if (test_bit(NE6X_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ne6x_dis_vf_qs(&pf->vf[i]); + } + + tmp = pf->num_alloc_vfs; + pf->num_qps_per_vf = 0; + pf->num_alloc_vfs = 0; + + for (i = 0; i < tmp; i++) { + if (test_bit(NE6X_VF_STATE_INIT, pf->vf[i].vf_states)) { + set_bit(NE6X_VF_STATE_DIS, pf->vf[i].vf_states); + ne6x_free_vf_res(&pf->vf[i]); + } + } + + if (ne6x_sriov_free_msix_res(pf)) + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); + + ne6x_dev_clear_vport(pf); + kfree(pf->vf); + pf->vf = NULL; + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x8000; + wr64_bar4(&pf->hw, 0x05300, reg); + + clear_bit(NE6X_VF_DIS, pf->state); +} + +static int ne6x_alloc_vfs(struct ne6x_pf *pf, int num_vfs) +{ + struct ne6x_vf *vfs; + + vfs = kcalloc(num_vfs, sizeof(*vfs), GFP_KERNEL); + if (!vfs) + return -ENOMEM; + + pf->vf = vfs; + pf->num_alloc_vfs = num_vfs; + + return 0; +} + +static int ne6x_sriov_set_msix_res(struct ne6x_pf *pf, u16 num_msix_needed) +{ + int sriov_base_vector; + + sriov_base_vector = NE6X_MAX_MSIX_NUM - num_msix_needed; + + /* make sure we only grab irq_tracker entries from the list end and + * that we have enough available MSIX vectors + */ + if (sriov_base_vector < 0) + return -EINVAL; + + return 0; +} + +static int ne6x_set_per_vf_res(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + u16 queue; + + if (!pf->num_alloc_vfs) + return -EINVAL; + + queue = NE6X_MAX_VP_NUM / pf->num_alloc_vfs; + + if (ne6x_sriov_set_msix_res(pf, queue * pf->num_alloc_vfs)) { + dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", pf->num_alloc_vfs); + return -EINVAL; + } + + /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ + pf->num_qps_per_vf = queue; + dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", pf->num_alloc_vfs, + pf->num_qps_per_vf, pf->num_qps_per_vf); + + return 0; +} + +static void ne6x_vc_clear_allowlist(struct ne6x_vf *vf) +{ + bitmap_zero(vf->opcodes_allowlist, VIRTCHNL_OP_MAX); +} + +/* default opcodes to communicate with VF */ +static const u32 default_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_OP_VERSION, + VIRTCHNL_OP_RESET_VF, +}; + +static void ne6x_vc_allowlist_opcodes(struct ne6x_vf *vf, const u32 *opcodes, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + set_bit(opcodes[i], vf->opcodes_allowlist); +} + +static void ne6x_vc_set_default_allowlist(struct ne6x_vf *vf) +{ + ne6x_vc_clear_allowlist(vf); + ne6x_vc_allowlist_opcodes(vf, default_allowlist_opcodes, + ARRAY_SIZE(default_allowlist_opcodes)); +} + +static void ne6x_set_dflt_settings_vfs(struct ne6x_pf *pf) +{ + int i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + vf->pf = pf; + vf->vf_id = i; + vf->base_queue = (NE6X_MAX_VP_NUM / pf->num_alloc_vfs) * i; + vf->num_vf_qs = pf->num_qps_per_vf; + vf->tx_rate = 0; + test_and_clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vc_set_default_allowlist(vf); + } +} + +static void ne6x_send_init_mbx_mesg(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + u64 reg_cfg; + int i; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), 0x0); + reg_cfg = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK)); + reg_cfg &= ~(1ULL << vf->base_queue); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), reg_cfg); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), reg_cfg); + } +} + +static struct ne6x_port_info *ne6x_vf_get_port_info(struct ne6x_vf *vf) +{ + struct ne6x_adapter *adpt = ne6x_get_vf_adpt(vf); + + return adpt->port_info; +} + +static struct ne6x_adapter *ne6x_adpt_alloc(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt = NULL; + int pf_adpt_idx; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* If we have already allocated our maximum number of adapters, + * pf->next_adpt will be NE6X_NO_ADPT. If not, pf->next_adpt index + * is available to be populated + */ + if (pf->next_adpt == NE6X_NO_ADPT) { + dev_dbg(dev, "out of adapter slots!\n"); + goto unlock_pf; + } + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + adpt->type = NE6X_ADPT_VF; + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + adpt->num_queue = pf->vf[vf_id].num_vf_qs; + adpt->num_q_vectors = pf->vf[vf_id].num_vf_qs; + /* vf_id 0 -- 63: vport: 0 -- 64: pf: 64 -- 68 */ + adpt->idx = pf->vf[vf_id].vf_id + pf->num_alloc_adpt; + adpt->vport = pf->vf[vf_id].vf_id; + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) + goto err_rings; + + /* vf attach pf alloc */ + pf_adpt_idx = pf->vf[vf_id].base_queue / (NE6X_MAX_VP_NUM / pf->hw.pf_port); + adpt->port_info->lport = pf->adpt[pf_adpt_idx]->port_info->lport; + adpt->port_info->hw_port_id = pf->adpt[pf_adpt_idx]->port_info->hw_port_id; + adpt->port_info->hw = &pf->hw; + adpt->port_info->hw_trunk_id = pf->adpt[pf_adpt_idx]->port_info->hw_trunk_id; + adpt->port_info->hw_queue_base = pf->vf[vf_id].base_queue; + adpt->port_info->hw_max_queue = pf->vf[vf_id].num_vf_qs; + adpt->base_queue = pf->vf[vf_id].base_queue; + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + pf->adpt[adpt->idx] = adpt; + + goto unlock_pf; + +err_rings: + kfree(adpt); + adpt = NULL; +unlock_pf: + mutex_unlock(&pf->switch_mutex); + return adpt; +} + +static struct ne6x_adapter *ne6x_adpt_setup_vf(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_alloc(pf, vf_id, num_vfs); + if (!adpt) { + dev_err(dev, "could not allocate adapter\n"); + return NULL; + } + + return adpt; +} + +static struct ne6x_adapter *ne6x_vf_adpt_setup(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_setup_vf(pf, vf->vf_id, num_vfs); + if (!adpt) { + dev_err(ne6x_pf_to_dev(pf), "Failed to create VF adapter\n"); + ne6x_vf_invalidate_adpt(vf); + return NULL; + } + + vf->lan_adpt_idx = adpt->idx; + vf->adpt = adpt; + + return adpt; +} + +static int ne6x_init_vf_adpt_res(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + u8 broadcast[ETH_ALEN]; + struct ne6x_adapter *adpt; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt = ne6x_vf_adpt_setup(vf, num_vfs); + if (!adpt) + return -ENOMEM; + + vf->tx_rate = 0; + ne6x_dev_set_vf_bw(adpt, vf->tx_rate); + eth_broadcast_addr(broadcast); + + return 0; +} + +static int ne6x_start_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + int retval, i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + retval = ne6x_init_vf_adpt_res(vf, num_vfs); + if (retval) { + dev_err(ne6x_pf_to_dev(pf), "Failed to initialize adapter resources for VF %d, error %d\n", + vf->vf_id, retval); + goto teardown; + } + + set_bit(NE6X_VF_STATE_INIT, vf->vf_states); + } + + ne6x_linkscan_schedule(pf); + + return 0; + +teardown: + for (i = i - 1; i >= 0; i--) { + struct ne6x_vf *vf = &pf->vf[i]; + + ne6x_vf_adpt_release(vf); + } + + return retval; +} + +static int ne6x_delete_pf_trunk(struct ne6x_pf *pf) +{ + return 0; +} + +static int ne6x_recycle_vp_resources(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt; + int rst, i; + u64 reg; + + rst = ne6x_delete_pf_trunk(pf); + if (rst) + return rst; + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_close(adpt); + } + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x7c000; + wr64_bar4(&pf->hw, 0x05300, reg); + + return 0; +} + +static int ne6x_adpt_resetup(struct ne6x_pf *pf, bool recovery) +{ + int vid, pooling, i, actual_vector = 1, size; + struct device *dev = ne6x_pf_to_dev(pf); + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + int qp_remaining, q_vectors; + struct ne6x_adapter *adpt = NULL; + u64 __iomem *reg; + + pooling = test_bit(NE6X_LINK_POOLING, pf->state); + if (pooling) + clear_bit(NE6X_LINK_POOLING, pf->state); + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + pci_disable_msix(pf->pdev); + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, + NE6X_MAX_MSIX_NUM); + if (actual_vector < NE6X_MAX_MSIX_NUM) { + clear_bit(NE6X_PF_MSIX, pf->state); + pci_disable_msix(pf->pdev); + dev_err(dev, "%s-%d: error msix enable failed\n", __func__, __LINE__); + } + + pf->irq_pile->num_entries = actual_vector; + } else { + if (!pf->irq_pile) { + size = sizeof(struct ne6x_lump_tracking) + (sizeof(u16) * actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + + pf->irq_pile->num_entries = actual_vector; + } + + test_and_set_bit(NE6X_PF_INTX, pf->state); + } + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + ne6x_for_each_pf(pf, vid) { + adpt = pf->adpt[vid]; + if (recovery) { + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = pf->hw.max_queue; + adpt->port_info->queue = adpt->port_info->hw_max_queue; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } else { + adpt->port_info->hw_queue_base_old = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = NE6X_PF_VP1_NUM + vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = 1u; + adpt->port_info->queue = 1u; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } + + for (i = 0; i < adpt->num_queue; i++) { + adpt->rx_rings[i]->reg_idx = adpt->base_queue + i; + adpt->cq_rings[i]->reg_idx = adpt->rx_rings[i]->reg_idx; + adpt->tx_rings[i]->reg_idx = adpt->cq_rings[i]->reg_idx; + } + + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_vport(adpt); + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + ne6x_dev_set_port2pi(adpt); + rtnl_lock(); + + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_open(adpt); + + rtnl_unlock(); + } + + ne6x_init_link_irq(pf); + ne6x_enable_link_irq(pf); + + if (pooling) { + set_bit(NE6X_LINK_POOLING, pf->state); + ne6x_linkscan_schedule(pf); + } + + return 0; +} + +static int ne6x_ena_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + int ret; + + ret = ne6x_recycle_vp_resources(pf); + if (ret) + goto err_pci_disable_sriov; + + ret = ne6x_adpt_resetup(pf, false); + if (ret) + goto err_pci_disable_sriov; + + ne6x_clr_vf_bw_for_max_vpnum(pf); + ret = ne6x_alloc_vfs(pf, num_vfs); + if (ret) + goto err_pci_disable_sriov; + + if (ne6x_set_per_vf_res(pf)) { + dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", + num_vfs); + ret = -ENOSPC; + goto err_unroll_sriov; + } + + ne6x_set_dflt_settings_vfs(pf); + if (ne6x_start_vfs(pf, num_vfs)) { + dev_err(dev, "Failed to start VF(s)\n"); + ret = -EAGAIN; + goto err_unroll_sriov; + } + + ne6x_init_mailbox_irq(pf); + ne6x_send_init_mbx_mesg(pf); + clear_bit(NE6X_VF_DIS, pf->state); + + return 0; + +err_unroll_sriov: + kfree(pf->vf); + pf->vf = NULL; + pf->num_alloc_vfs = 0; +err_pci_disable_sriov: + pci_disable_sriov(pf->pdev); + + return ret; +} + +static int ne6x_pci_sriov_ena(struct ne6x_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = ne6x_pf_to_dev(pf); + int err; + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ne6x_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return 0; + + if (num_vfs > NE6X_MAX_VP_NUM) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", num_vfs, + NE6X_MAX_VP_NUM); + return -EOPNOTSUPP; + } + + err = ne6x_ena_vfs(pf, num_vfs); + if (err) { + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + return err; + } + + if (num_vfs) + test_and_set_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + return 0; +} + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_adapter *adpt = NULL; + struct ne6x_vf *vf = NULL; + pbmp_t port_bitmap; + int err = 0, vf_id; + int timeout = 50; + int status; + + if (!(num_vfs == 0 || num_vfs == 2 || num_vfs == 4 || num_vfs == 8 || + num_vfs == 16 || num_vfs == 32 || num_vfs == 64)) + return -EINVAL; + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + dev_err(ne6x_pf_to_dev(pf), "ne6x irq number < %d!\n", NE6X_MAX_MSIX_NUM); + return -EPERM; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + dev_warn(ne6x_pf_to_dev(pf), "ne6x config busy, timeout!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + if (!num_vfs) { + set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + if (!pci_vfs_assigned(pdev)) { + ne6x_free_vfs(pf); + ne6x_disable_mailbox_irq(pf); + ne6x_free_mailbox_irq(pf); + ne6x_mbx_deinit_snapshot(&pf->hw); + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + if (!test_bit(NE6X_REMOVE, pf->state)) { + ne6x_recycle_vp_resources(pf); + err = ne6x_adpt_resetup(pf, true); + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + if (err) + goto err_recovery; + + return 0; + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return -EBUSY; + } + + status = ne6x_mbx_init_snapshot(&pf->hw, num_vfs); + if (status) + return ne6x_status_to_errno(status); + + err = ne6x_pci_sriov_ena(pf, num_vfs); + if (err) { + ne6x_mbx_deinit_snapshot(&pf->hw); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; + } + + PBMP_CLEAR(port_bitmap); + + /* config vport, default vlan */ + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + /* config default vlan */ + PBMP_PORT_ADD(port_bitmap, adpt->vport); + ne6x_dev_set_vport(adpt); + adpt->hw_feature = ne6x_dev_get_features(adpt); + } + + err = pci_enable_sriov(pf->pdev, num_vfs); + if (err) + goto err_hanler; + + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return num_vfs; + +err_hanler: + ne6x_dev_clear_vport(pf); + /* config vport, default vlan */ + ne6x_for_each_pf(pf, vf_id) { + adpt = pf->adpt[vf_id]; + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + ne6x_dev_set_vport(adpt); + } + + if (!pci_vfs_assigned(pdev)) { + ne6x_mbx_deinit_snapshot(&pf->hw); + ne6x_free_vfs(pf); + pf->num_alloc_vfs = 0; + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + } + +err_recovery: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; +} + +static int ne6x_validate_vf_id(struct ne6x_pf *pf, u16 vf_id) +{ + /* vf_id range is only valid for 0-255, and should always be unsigned */ + if (vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + return 0; +} + +static int ne6x_validate_outer_vf_id(struct ne6x_pf *pf, u16 out_vf_id) +{ + if (out_vf_id >= (pf->num_alloc_vfs / pf->num_alloc_adpt)) + return -EINVAL; + + return 0; +} + +static int ne6x_sdk_send_msg_to_vf(struct ne6x_hw *hw, u16 vfid, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_pf *pf = hw->back; + struct ne6x_vf *vf = &pf->vf[vfid]; + int timeout = 2000; + int i; + + usnap.snap.state = v_retval; + usnap.snap.len = msglen; + usnap.snap.type = v_opcode; + + for (i = 0; i < msglen && i < 6; i++) + usnap.snap.data[i] = msg[i]; + + while (!(pf->hw.ne6x_mbx_ready_to_send[vfid])) { + usleep_range(100, 200); + timeout--; + if (!timeout) + break; + } + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), usnap.val); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_INT_REQ), (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_TRAVERSE; + pf->hw.ne6x_mbx_ready_to_send[vfid] = false; + + return 0; +} + +static int ne6x_vc_send_msg_to_vf(struct ne6x_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, + u8 *msg, u16 msglen) +{ + struct device *dev; + struct ne6x_pf *pf; + int aq_ret; + + if (!vf) + return -EINVAL; + + pf = vf->pf; + dev = ne6x_pf_to_dev(pf); + + if (ne6x_validate_vf_id(pf, vf->vf_id)) { + dev_err(dev, "vf id[%d] is invalid\n", vf->vf_id); + return -EINVAL; + } + + /* single place to detect unsuccessful return values */ + if (v_retval) + dev_info(dev, "VF %d failed opcode %s, retval: %s\n", vf->vf_id, + ne6x_opcode_str(v_opcode), ne6x_mbox_status_str(v_retval)); + + aq_ret = ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen); + if (aq_ret) { + dev_info(dev, "Unable to send the message to VF %d aq_err %d\n", vf->vf_id, aq_ret); + return -EIO; + } + + return 0; +} + +static int ne6x_check_vf_init(struct ne6x_pf *pf, struct ne6x_vf *vf) +{ + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(ne6x_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", vf->vf_id); + return -EBUSY; + } + + return 0; +} + +static int ne6x_vc_add_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + struct device *dev = ne6x_pf_to_dev(vf->pf); + u8 *mac_addr = vc_ether_addr->addr; + + if (!is_unicast_ether_addr(mac_addr)) { + dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return -EPERM; + } + + if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) { + dev_err(dev, "vf already use the same addr\n"); + return -EPERM; + } + + ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); + ne6x_adpt_add_mac(adpt, mac_addr, true); + + return 0; +} + +static int ne6x_vc_del_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 *mac) +{ + return ne6x_adpt_del_mac(adpt, mac, true); +} + +static int ne6x_vc_get_vf_res_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *vfres = NULL; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + int len, ret; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + + len = sizeof(union u_ne6x_mbx_snap_buffer_data); + vfres = kzalloc(len, GFP_KERNEL); + if (!vfres) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + vfres->snap.type = VIRTCHNL_OP_GET_VF_RESOURCES; + vfres->snap.data[0] = vf->vf_id; /* vport */ + vfres->snap.data[1] = pf_adpt->port_info->lport; /* lport */ + vfres->snap.data[2] = pf_adpt->port_info->hw_port_id; /* pport */ + vfres->snap.data[3] = pf_adpt->port_info->hw_queue_base; /* base_queue */ + vfres->snap.data[4] = pf->num_qps_per_vf; /* num_qps_per_vf */ + vfres->snap.data[5] = pf->num_alloc_vfs / pf->num_alloc_adpt; /* num vfs of per hw_port */ + vfres->snap.len = 6; + vf->ready = 0; + vf->adpt->port_info->phy.link_info.link_info = 0; + vf->ready_to_link_notify = 0; + set_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + +err: + /* send the response back to the VF */ + vfres->snap.state = v_ret; + ret = ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, + vfres->snap.state, + (u8 *)vfres->snap.data, + vfres->snap.len); + + return ret; +} + +static int ne6x_vc_add_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_add_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_add_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_del_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_del_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_del_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_handle_mac_addr_msg(struct ne6x_vf *vf, u8 *msg, bool set) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *virtchnl_ether_addr); + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *usnap; + struct virtchnl_ether_addr eth_addr; + enum virtchnl_ops vc_op; + struct ne6x_adapter *adpt; + u8 *mac_addr; + int result; + + if (set) { + vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_add_mac_addr; + } else { + vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_del_mac_addr; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mac_addr = usnap->snap.data; + + if (is_broadcast_ether_addr(mac_addr) || is_zero_ether_addr(mac_addr)) + goto handle_mac_exit; + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) + goto handle_mac_exit; + + ether_addr_copy(eth_addr.addr, mac_addr); + result = ne6x_vc_cfg_mac(vf, adpt, ð_addr); + if (result == -EEXIST || result == -ENOENT) { + goto handle_mac_exit; + } else if (result) { + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto handle_mac_exit; + } + +handle_mac_exit: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); +} + +static int ne6x_vc_add_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, true); +} + +static int ne6x_vc_del_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, false); +} + +static int ne6x_vf_set_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d enable promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, true); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, true); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode off VF-%u mac: %d, trunk: 0x%x, failed, error: %d\n", + vf->vf_id, 0, adpt->port_info->hw_trunk_id, status); + return status; + } + + return 0; +} + +static int ne6x_vf_clear_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d clear promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, false); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, false); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; +} + +static int ne6x_vc_cfg_promiscuous_mode_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)usnap->snap.data; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + bool alluni = false, allmulti = false; + int ucast_err = 0, mcast_err = 0; + struct ne6x_pf *pf = vf->pf; + u8 mcast_m, ucast_m; + struct ne6x_adapter *adpt; + struct device *dev; + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + dev = ne6x_pf_to_dev(pf); + + if (info->flags & FLAG_VF_UNICAST_PROMISC) + alluni = true; + + if (info->flags & FLAG_VF_MULTICAST_PROMISC) + allmulti = true; + + mcast_m = NE6X_MCAST_PROMISC_BITS; + ucast_m = NE6X_UCAST_PROMISC_BITS; + + if (alluni) + ucast_err = ne6x_vf_set_adpt_promisc(vf, adpt, ucast_m); + else + ucast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, ucast_m); + + if (allmulti) + mcast_err = ne6x_vf_set_adpt_promisc(vf, adpt, mcast_m); + else + mcast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, mcast_m); + + if (!mcast_err) { + if (allmulti && !test_and_set_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", + vf->vf_id); + else if (!allmulti && test_and_clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", + vf->vf_id); + } + + if (!ucast_err) { + if (alluni && !test_and_set_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", + vf->vf_id); + else if (!alluni && test_and_clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", + vf->vf_id); + } + +error_param: + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, v_ret, NULL, 0); +} + +static bool ne6x_is_vf_link_up(struct ne6x_vf *vf) +{ + struct ne6x_port_info *pi = ne6x_vf_get_port_info(vf); + struct ne6x_pf *pf = vf->pf; + + if (ne6x_check_vf_init(pf, vf)) + return false; + + if (vf->link_forced) + return vf->link_up; + else + return pi->phy.link_info.link_info & NE6X_AQ_LINK_UP; +} + +static u32 ne6x_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + switch (link_speed) { + case NE6X_LINK_SPEED_10GB: + speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + speed = NE6X_LINK_SPEED_100GB; + break; + default: + speed = NE6X_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} + +static void ne6x_set_pfe_link(struct ne6x_vf *vf, struct virtchnl_pf_event *pfe, + int ne6x_link_speed, bool link_up) +{ + pfe->link_status = link_up; + /* Speed in Mbps */ + if (link_up && vf->link_forced) + ne6x_link_speed = NE6X_LINK_SPEED_25GB; + + pfe->link_speed = ne6x_conv_link_speed_to_virtchnl(true, ne6x_link_speed); +} + +static void ne6x_vc_notify_vf_link_state(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_port_info *pi; + u8 data[6] = {0}; + + pi = ne6x_vf_get_port_info(vf); + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + + if (ne6x_is_vf_link_up(vf)) + ne6x_set_pfe_link(vf, &pfe, pi->phy.link_info.link_speed, true); + else + ne6x_set_pfe_link(vf, &pfe, NE6X_LINK_SPEED_UNKNOWN, false); + + data[0] = pfe.event; + data[1] = (pfe.link_speed >> 24) & 0xff; + data[2] = (pfe.link_speed >> 16) & 0xff; + data[3] = (pfe.link_speed >> 8) & 0xff; + data[4] = (pfe.link_speed >> 0) & 0xff; + data[5] = pfe.link_status; + + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 6); +} + +void ne6x_vc_notify_link_state(struct ne6x_vf *vf) +{ + if (vf->ready_to_link_notify) + ne6x_vc_notify_vf_link_state(vf); +} + +static void ne6x_vc_notify_vf_reset(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe; + struct ne6x_pf *pf; + u8 data[6] = {0}; + + if (!vf) + return; + + pf = vf->pf; + if (ne6x_validate_vf_id(pf, vf->vf_id)) + return; + + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(NE6X_VF_STATE_INIT, vf->vf_states) && + !test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(NE6X_VF_STATE_DIS, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + data[0] = pfe.event; + ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 1); +} + +static void ne6x_vc_notify_vf_trust_change(struct ne6x_vf *vf) +{ + struct virtchnl_vf_config vfconfig = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u8 data[6] = {0}; + + dev = ne6x_pf_to_dev(pf); + vfconfig.type = VIRTCHNL_VF_CONFIG_TRUST; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + vfconfig.data[0] = 1; + else + vfconfig.data[0] = 0; + + data[0] = vfconfig.type; + data[1] = vfconfig.data[0]; + dev_info(dev, "vfconfig_type = %d,data = %d\n", data[0], data[1]); + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_VF_CONFIG, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 2); +} + +static bool ne6x_reset_vf(struct ne6x_vf *vf, bool is_vflr) +{ + struct ne6x_adapter *adpt; + + adpt = ne6x_get_vf_adpt(vf); + + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + ne6x_dis_vf_qs(vf); + + if (test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + clear_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + adpt->port_info->phy.link_info.link_info = 0x0; + if (is_vflr) + vf->rx_tx_state = false; + } + + if (test_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states); + + if (test_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states); + + return 0; +} + +static void ne6x_vc_reset_vf(struct ne6x_vf *vf, bool update_tx_rx) +{ + ne6x_vc_notify_vf_reset(vf); + ne6x_reset_vf(vf, update_tx_rx); +} + +static int ne6x_vc_request_qs_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + u16 req_queues = (usnap->snap.data[1] << 8) | usnap->snap.data[0]; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u16 max_avail_vf_qps, max_allowed_vf_qps; + u8 req_reset = usnap->snap.data[2]; + bool need_update_rx_tx = false; + struct ne6x_pf *pf = vf->pf; + u16 tx_rx_queue_left; + u16 num_queue_pairs; + struct device *dev; + u16 cur_queues; + + ne6x_clear_vf_status(vf); + dev = ne6x_pf_to_dev(pf); + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + max_allowed_vf_qps = pf->num_qps_per_vf; + cur_queues = vf->num_vf_qs; + tx_rx_queue_left = cur_queues; + max_avail_vf_qps = tx_rx_queue_left + cur_queues; + + if (!req_queues) { + dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", vf->vf_id); + } else if (req_queues > max_allowed_vf_qps) { + dev_err(dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, + max_allowed_vf_qps); + num_queue_pairs = max_allowed_vf_qps; + } else if (req_queues > cur_queues && req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, + req_queues - cur_queues, tx_rx_queue_left); + num_queue_pairs = min_t(u16, max_avail_vf_qps, max_allowed_vf_qps); + } else { + if (req_queues != vf->num_req_qs) { + vf->num_req_qs = req_queues; + need_update_rx_tx = true; + } + if (req_reset) { + ne6x_vc_reset_vf(vf, need_update_rx_tx); + } else { + vf->ready = false; + if (need_update_rx_tx) + vf->rx_tx_state = false; + + vf->adpt->port_info->phy.link_info.link_info = 0x0; + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + } + + return 0; + } + +error_param: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, v_ret, (u8 *)&num_queue_pairs, + 2); +} + +static int ne6x_vc_config_mtu_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u16 *mtu; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mtu = (u16 *)(rsvsnap->snap.data); + + dev = ne6x_pf_to_dev(pf); + dev_info(dev, "%s: mtu = %d\n", __func__, *mtu); + ne6x_dev_set_mtu(adpt, *mtu); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_MTU, v_ret, NULL, 0); +} + +struct virtchnl_vlan_info { + u16 vlan_id; + s16 flags; +}; + +static int ne6x_vc_config_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_vlan_info *dpdk_vlan; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + struct ne6x_vlan vlan; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + dpdk_vlan = (struct virtchnl_vlan_info *)rsvsnap->snap.data; + if (dpdk_vlan->flags) { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) { + dev_info(dev, "%s: add vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + dev_info(dev, "%s: add vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } + } else { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) { + dev_info(dev, "%s: del vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } else { + dev_info(dev, "%s: del vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN, v_ret, NULL, 0); +} + +#define ETH_VLAN_STRIP_MASK 0x0001 +#define ETH_VLAN_FILTER_MASK 0x0002 +#define ETH_QINQ_STRIP_MASK 0x0008 +#define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001 +#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020 +#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200 + +struct virtchnl_vlan_offload_info { + u16 mask; + u16 feature; +}; + +static int ne6x_vc_config_vlan_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_vlan_offload_info *offload; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt->hw_feature = ne6x_dev_get_features(adpt); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + offload = (struct virtchnl_vlan_offload_info *)rsvsnap->snap.data; + + if (offload->mask & ETH_VLAN_FILTER_MASK) { + dev_info(dev, "%s: ETH_VLAN_FILTER_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_FILTER) { + dev_info(dev, "%s: ETH_VLAN_FILTER ON\n", __func__); + adpt->hw_feature |= (NE6X_F_RX_VLAN_FILTER); + } else { + dev_info(dev, "%s: ETH_VLAN_FILTER OFF\n", __func__); + adpt->hw_feature &= ~(NE6X_F_RX_VLAN_FILTER); + } + } + + if (offload->mask & ETH_VLAN_STRIP_MASK) { + dev_info(dev, "%s: ETH_VLAN_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_STRIP) { + dev_info(dev, "%s: ETH_VLAN_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_VLAN_STRIP; + } else { + dev_info(dev, "%s: ETH_VLAN_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + } + } + + if (offload->mask & ETH_QINQ_STRIP_MASK) { + dev_info(dev, "%s: ETH_QINQ_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_QINQ_STRIP) { + dev_info(dev, "%s: ETH_QINQ_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_QINQ_STRIP; + } else { + dev_info(dev, "%s: ETH_QINQ_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + } + } + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD, v_ret, NULL, 0); +} + +struct virtchnl_flow_ctrl_info { + u16 mode; + u16 high_water; +}; + +enum rte_eth_fc_mode { + RTE_FC_NONE = 0, /**< Disable flow control. */ + RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */ + RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */ + RTE_FC_FULL /**< Enable flow control on both side. */ +}; + +static int ne6x_vc_config_flow_ctrl_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_flow_ctrl_info *flow; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_flowctrl flowctrl; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + flow = (struct virtchnl_flow_ctrl_info *)rsvsnap->snap.data; + if (flow->mode == RTE_FC_FULL) { + flowctrl.rx_pause = 1; + flowctrl.tx_pause = 1; + } else if (flow->mode == RTE_FC_RX_PAUSE) { + flowctrl.rx_pause = 1; + } else if (flow->mode == RTE_FC_TX_PAUSE) { + flowctrl.tx_pause = 1; + } else { + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + } + + dev_info(dev, "%s: mode = %d high water = %d\n", __func__, flow->mode, flow->high_water); + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) { + dev_info(dev, "%s: set flow ctrl failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } + + ret = ne6x_dev_set_vf_bw(adpt, flow->high_water); + if (ret) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_FLOW_CTRL, v_ret, NULL, 0); +} + +static int ne6x_vc_config_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + u8 *data = (u8 *)&adpt->rss_info; + int i; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + for (i = 0; i < rsvsnap->snap.len; i++) { + data[adpt->rss_size] = rsvsnap->snap.data[i]; + adpt->rss_size++; + } + + if (adpt->rss_size >= sizeof(struct ne6x_rss_info)) { + adpt->rss_size = 0; + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS, v_ret, NULL, 0); +} + +static int ne6x_vc_changed_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + int i, ret; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + memcpy(&adpt->num_queue, rsvsnap->snap.data, sizeof(adpt->num_queue)); + + if (adpt->rss_info.ind_table_size > NE6X_RSS_MAX_IND_TABLE_SIZE) + adpt->rss_info.ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ret = ne6x_dev_set_rss(adpt, &adpt->rss_info); + ret |= ne6x_dev_add_unicast_for_fastmode(adpt, vf->dev_lan_addr.addr); + ret |= ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CHANGED_RSS, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + + return ret; +} + +static int ne6x_vc_add_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", + __func__, vlan_tpid, vlan_id); + + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + dev_info(&vf->pf->pdev->dev, "%s:vfp_vid %04x\n", __func__, vf->vfp_vid); + + ne6x_adpt_add_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_del_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", __func__, vlan_tpid, + vlan_id); + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + ne6x_adpt_del_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_config_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = rsvsnap->snap.data[3]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[2]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[1]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[0]; + + if (vf->tx_rate) + adpt->hw_feature |= NE6X_F_TX_QOSBANDWIDTH; + else + adpt->hw_feature &= ~NE6X_F_TX_QOSBANDWIDTH; + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_OFFLOAD, VIRTCHNL_STATUS_SUCCESS, NULL, + 0); +} + +static int ne6x_vc_request_feature_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = ne6x_dev_get_features(adpt); + dev_info(&vf->pf->pdev->dev, "%s: vf->vf_id =%d vport = %d lport = %d pport = %d hw_queue_base = %d hw_feature = %08X\n", + __func__, vf->vf_id, adpt->vport, adpt->port_info->lport, + adpt->port_info->hw_port_id, adpt->port_info->hw_queue_base, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_FEATURE, VIRTCHNL_STATUS_SUCCESS, + (u8 *)&adpt->hw_feature, sizeof(u32)); +} + +static int ne6x_vc_reset_vf_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + + vf->ready = false; + vf->rx_tx_state = 0; + vf->adpt->port_info->phy.link_info.link_info = false; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + + vf->port_vlan_info = NE6X_VLAN(0, 0, 0); + vf->link_forced = false; + vf->trusted = false; + vf->tx_rate = 0; + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_RESET_VF, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_get_logic_vf_id(struct net_device *netdev, int vf_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + + return (adpt->idx * (pf->num_alloc_vfs / pf->num_alloc_adpt) + vf_id); +} + +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_vf *vf; + int logic_vf_id; + int ret = 0; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + if (logic_vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + + netdev_info(netdev, "set vf-%d trust %s\n", vf_id, trusted ? "on" : "off"); + + if (!vf) { + netdev_err(netdev, "vf is NULL\n"); + return -EINVAL; + } + + /* Check if already ready ?*/ + if (!vf->ready) { + netdev_err(netdev, "vf is not ready\n"); + return (-1); + } + + /* Check if already trusted */ + if (trusted == vf->trusted) + return 0; + + vf->trusted = trusted; + + if (vf->trusted) { + set_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + } else { + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vf_clear_adpt_promisc(vf, ne6x_get_vf_adpt(vf), + NE6X_UCAST_PROMISC_BITS | + NE6X_MCAST_PROMISC_BITS); + } + + ne6x_vc_notify_vf_trust_change(vf); + dev_info(ne6x_pf_to_dev(pf), "VF %u is now %strusted\n", + logic_vf_id, trusted ? "" : "un"); + + return 0; +} + +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret = 0, logic_vf_id; + struct ne6x_vf *vf; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + if (!vf) + return -EINVAL; + + netdev_info(netdev, "set vf-%d link state %s\n", vf_id, + link_state == IFLA_VF_LINK_STATE_ENABLE + ? "enable" + : (link_state == IFLA_VF_LINK_STATE_DISABLE ? "disable" : "auto")); + + /* Check if already ready ?*/ + if (!vf->ready) + return (-1); + + if (!vf->trusted) + return (-1); + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; + default: + ret = -EINVAL; + goto out_put_vf; + } + + ne6x_vc_notify_vf_link_state(vf); + +out_put_vf: + return ret; +} + +static int ne6x_vc_modify_vf_mac(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + if (!pf->adpt) + dev_info(ne6x_pf_to_dev(pf), "adpt is null vf %d\n", vf->vf_id); + + /* set zero addr mean clear mac */ + if (is_zero_ether_addr(vc_ether_addr.addr)) + return ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + + if (is_valid_ether_addr(vf->dev_lan_addr.addr)) { + ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + memset(vf->dev_lan_addr.addr, 0, 6); + } + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + +err: + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_VF_ADDR, v_ret, vc_ether_addr.addr, 6); +} + +static int ne6x_vc_set_fast_mode(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_pf *pf = vf->pf; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + if (rsvsnap->snap.data[0]) { + vf->adpt->num_queue = rsvsnap->snap.data[1]; + v_ret = ne6x_dev_set_fast_mode(pf, true, vf->adpt->num_queue); + } else { + v_ret = ne6x_dev_set_fast_mode(pf, false, 0); + } + + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_FAST_MDOE, v_ret, NULL, 0); +} + +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_hw *hw = &pf->hw; + struct ne6x_vf *vf = NULL; + struct ne6x_vlan vlan; + struct device *dev; + int err = 0; + int i; + + dev = ne6x_pf_to_dev(pf); + ne6x_for_each_vf(pf, i) { + if (pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i]) { + vf = &pf->vf[i]; + usnap.val = rd64_bar4(hw, NE6X_VF_MAILBOX_ADDR(vf->base_queue)); + WARN(usnap.snap.len > 6, ">>>>>>>>>>>>>>>>>>recv VF mailbox error!!!<<<<<<<<<<<<<<<<<<<"); + switch (usnap.snap.type) { + case VIRTCHNL_OP_GET_VF_RESOURCES: + err = ne6x_vc_get_vf_res_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + vf->ready = 1; + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + err = ne6x_vc_add_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + err = ne6x_vc_del_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_ADD_VLAN: + err = ne6x_vc_add_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_DEL_VLAN: + err = ne6x_vc_del_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ne6x_vc_cfg_promiscuous_mode_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_EVENT: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ne6x_vc_request_qs_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_RSS: + err = ne6x_vc_config_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN: + err = ne6x_vc_config_vlan_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + err = ne6x_vc_config_vlan_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_MTU: + err = ne6x_vc_config_mtu_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + err = ne6x_vc_config_flow_ctrl_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CHANGED_RSS: + err = ne6x_vc_changed_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + err = ne6x_vc_config_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + err = ne6x_vc_request_feature_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_RESET_VF: + err = ne6x_vc_reset_vf_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_PORT_STATUS: + ne6x_dev_add_broadcast_leaf(ne6x_get_vf_adpt(vf)); + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(ne6x_get_vf_adpt(vf), vlan); + ne6x_vc_notify_vf_link_state(vf); + + if (!vf->ready_to_link_notify) + vf->ready_to_link_notify = 1; + + ne6x_linkscan_schedule(pf); + break; + case VIRTCHNL_OP_SET_VF_ADDR: + err = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_SET_FAST_MDOE: + err = ne6x_vc_set_fast_mode(vf, (u8 *)&usnap); + break; + /* VIRTCHNL_OP_VERSION not used */ + default: + dev_err(dev, "Unsupported opcode %s from VF %d\n", + ne6x_opcode_str(usnap.snap.type), i); + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); + break; + } + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = false; + } + if (err) + /* Helper function cares less about error return values here + * as it is busy with pending work. + */ + dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", i, + usnap.snap.type, err); + } + + if (test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state)) + clear_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); +} + +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid = 0; + int ret = 0; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + /* first adpt is always the LAN adpt */ + adpt = pf->adpt[vf->lan_adpt_idx]; + if (!adpt) { + ret = -ENOENT; + goto error_param; + } + + ivi->vf = vf_id; + + ether_addr_copy(ivi->mac, vf->dev_lan_addr.addr); + + ivi->vlan = vf->port_vlan_info.vid; + ivi->qos = vf->port_vlan_info.prio; + if (vf->port_vlan_info.vid) + ivi->vlan_proto = cpu_to_be16(vf->port_vlan_info.tpid); + + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + ivi->trusted = 1; + else + ivi->trusted = 0; + +error_param: + return ret; +} + +static void ne6x_calc_token_for_bw(int max_tx_rate, int *time_inv, int *tocken) +{ + if (max_tx_rate <= 100) { + *time_inv = 3910; + *tocken = max_tx_rate; + } else if (max_tx_rate <= 1000) { + *time_inv = 790; + *tocken = max_tx_rate / 5; + } else if (max_tx_rate < 5000) { + *time_inv = 395; + *tocken = max_tx_rate / 10; + } else if (max_tx_rate < 10000) { + *time_inv = 118; + *tocken = max_tx_rate / 33; + } else { + *time_inv = 39; + *tocken = max_tx_rate / 100; + } +} + +static int ne6x_set_vf_bw_for_max_vpnum(struct ne6x_pf *pf, int vf_id, int max_tx_rate) +{ + union ne6x_sq_meter_cfg0 sq_meter_cfg0; + union ne6x_sq_meter_cfg1 sq_meter_cfg1; + union ne6x_sq_meter_cfg2 sq_meter_cfg2; + union ne6x_sq_meter_cfg3 sq_meter_cfg3; + struct ne6x_hw *hw = &pf->hw; + int time_inv = 0; + int tocken = 0; + + sq_meter_cfg3.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3)); + sq_meter_cfg3.reg.csr_meter_pause_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3), sq_meter_cfg3.val); + sq_meter_cfg2.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2)); + sq_meter_cfg2.reg.csr_meter_resume_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2), sq_meter_cfg2.val); + + sq_meter_cfg1.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1)); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = max_tx_rate; + + if (max_tx_rate) { + ne6x_calc_token_for_bw(max_tx_rate, &time_inv, &tocken); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = tocken; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = time_inv; + } else { + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = 0x1; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = 0x1; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1), sq_meter_cfg1.val); + sq_meter_cfg0.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0)); + sq_meter_cfg0.reg.csr_meter_pkt_token_num_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_ipg_len_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_packet_mode_vp = 0x0; + + if (max_tx_rate) { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + } else { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x0; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0), sq_meter_cfg0.val); + + return 0; +} + +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf) +{ + int index; + + for (index = 0; index < NE6X_MAX_VP_NUM; index++) + ne6x_set_vf_bw_for_max_vpnum(pf, index, 0); +} + +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = np->adpt->back; + struct ne6x_adapter *adpt; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + ret = -EINVAL; + goto error; + } + + ret = ne6x_validata_tx_rate(adpt, logic_vfid, min_tx_rate, max_tx_rate); + if (ret) { + ret = -EINVAL; + goto error; + } + + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", logic_vfid); + ret = -EAGAIN; + goto error; + } + + if (pf->num_alloc_vfs == 64) + ret = ne6x_set_vf_bw_for_max_vpnum(pf, logic_vfid, max_tx_rate); + else + ret = ne6x_dev_set_vf_bw(adpt, max_tx_rate); + + if (ret) + goto error; + + vf->tx_rate = max_tx_rate; + + return 0; +error: + return ret; +} + +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + + adpt = ne6x_get_vf_adpt(vf); + if (!is_valid_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (is_multicast_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac)) { + dev_err(&pf->pdev->dev, "already use the same Ethernet address %pM for VF %d\n", + mac, vf_id); + goto error_param; + } + + /*simluate a msg from vf*/ + usnap.snap.type = VIRTCHNL_OP_SET_VF_ADDR; + usnap.snap.state = VIRTCHNL_STATUS_SUCCESS; + usnap.snap.len = 6; + memcpy(usnap.snap.data, mac, usnap.snap.len); + ret = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + +error_param: + return ret; +} + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h new file mode 100644 index 00000000000000..2f094d164fe3f7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_VIRTCHNL_PF_H +#define _NE6X_VIRTCHNL_PF_H + +#include "mailbox.h" + +#define NE6X_NO_ADPT 0xffff + +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + VIRTCHNL_EVENT_DCF_ADPT_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u32 link_speed; + u8 link_status; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +/* Specific VF states */ +enum ne6x_vf_states { + NE6X_VF_STATE_INIT = 0, /* PF is initializing VF */ + NE6X_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + NE6X_VF_STATE_QS_ENA, /* VF queue(s) enabled */ + NE6X_VF_STATE_DIS, + NE6X_VF_STATE_MC_PROMISC, + NE6X_VF_STATE_UC_PROMISC, + NE6X_VF_STATES_NBITS +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; +}; + +struct virtchnl_promisc_info { + u16 adpt_id; + u16 flags; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +enum ne6x_promisc_flags { + NE6X_PROMISC_UCAST_RX = 0x1, + NE6X_PROMISC_UCAST_TX = 0x2, + NE6X_PROMISC_MCAST_RX = 0x4, + NE6X_PROMISC_MCAST_TX = 0x8, + NE6X_PROMISC_BCAST_RX = 0x10, + NE6X_PROMISC_BCAST_TX = 0x20, + NE6X_PROMISC_VLAN_RX = 0x40, + NE6X_PROMISC_VLAN_TX = 0x80, +}; + +#define NE6X_UCAST_PROMISC_BITS (NE6X_PROMISC_UCAST_TX | NE6X_PROMISC_UCAST_RX) +#define NE6X_MCAST_PROMISC_BITS (NE6X_PROMISC_MCAST_TX | NE6X_PROMISC_MCAST_RX) + +enum ne6x_vf_config_flag { + NE6X_VF_CONFIG_FLAG_TRUSTED = 0, + NE6X_VF_CONFIG_FLAG_LINK_FORCED, + NE6X_VF_CONFIG_FLAG_NBITS /* must be last */ +}; + +struct ne6x_key { + u8 rsv0; + u8 pi; + u8 mac_addr[6]; + u8 rsv1[56]; +}; + +/* VF information structure */ +struct ne6x_vf { + struct ne6x_pf *pf; + struct ne6x_adapter *adpt; + + u16 vf_id; /* VF ID in the PF space */ + u16 lan_adpt_idx; /* index into PF struct */ + /* first vector index of this VF in the PF space */ + u16 vfp_vid; + u16 vfp_tpid; + int tx_rate; + u8 rx_tx_state; + bool ready; + bool ready_to_link_notify; + + u16 base_queue; + u16 num_vf_qs; + u16 num_req_qs; + + struct ne6x_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */ + + u8 trusted : 1; + u8 link_forced : 1; + u8 link_up : 1; /* only valid if VF link is forced */ + + struct virtchnl_ether_addr dev_lan_addr; + DECLARE_BITMAP(vf_states, NE6X_VF_STATES_NBITS); /* VF runtime states */ + DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + DECLARE_BITMAP(vf_config_flag, NE6X_VF_CONFIG_FLAG_NBITS); +}; + +#define ne6x_for_each_vf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) +#define ne6x_for_each_pf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_adpt; (i)++) + +#ifdef CONFIG_PCI_IOV +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs); +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf); +void ne6x_vc_notify_link_state(struct ne6x_vf *vf); +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf); + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf); +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate); +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); + +#else /* CONFIG_PCI_IOV */ +static inline int ne6x_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, + int min_tx_rate, int max_tx_rate) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_PCI_IOV */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h new file mode 100644 index 00000000000000..3c5bade3983653 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h @@ -0,0 +1,551 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_H +#define _NE6XVF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "common.h" +#include "feature.h" +#include "txrx.h" +#include "mailbox.h" +#include "ne6xvf_virtchnl.h" + +#define NE6XVF_MAX_AQ_BUF_SIZE 4096 +#define NE6XVF_AQ_LEN 32 +#define NE6XVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ + +#define NE6XVF_REG_ADDR(_VPID, _OFST) (((_VPID) << 12) + ((_OFST) << 4)) + +#define NE6XVF_DB_STATE 0x1a +#define NE6XVF_MAILBOX_DATA 0x19 +#define NE6XVF_PF_MAILBOX_DATA 0x18 + +#define NE6XVF_QC_TAIL1(_Q) (((_Q) << 12) | (NE6X_CQ_HD_POINTER << 4)) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QTX_TAIL1(_Q) (((_Q) << 12) | (0 << 11) | 0) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QRX_TAIL1(_Q) (((_Q) << 12) | (1 << 11) | 0) /* _i=0...15 Reset: PFR */ + +#define ne6xvf_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ +} while (0) + +#define hw_dbg(h, s, ...) \ + pr_debug("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__) + +extern char ne6xvf_driver_name[]; +extern const char ne6xvf_driver_version[]; +extern struct workqueue_struct *ne6xvf_wq; + +#define ne6xvf_init_spinlock(_sp) ne6xvf_init_spinlock_d(_sp) +#define ne6xvf_acquire_spinlock(_sp) ne6xvf_acquire_spinlock_d(_sp) +#define ne6xvf_release_spinlock(_sp) ne6xvf_release_spinlock_d(_sp) +#define ne6xvf_destroy_spinlock(_sp) ne6xvf_destroy_spinlock_d(_sp) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr0 + (reg))) +#define rd64(a, reg) readq((a)->hw_addr0 + (reg)) + +#define NE6XVF_READ_REG(hw, reg) rd64(hw, reg) +#define NE6XVF_WRITE_REG(hw, reg, value) wr64(hw, reg, value) + +#define NE6XVF_MAX_REQ_QUEUES 32 + +#define NE6XVF_RESET_WAIT_MS 10 +#define NE6XVF_RESET_WAIT_DETECTED_COUNT 50 +#define NE6XVF_RESET_WAIT_COMPLETE_COUNT 2000 + +enum ne6xvf_critical_section_t { + __NE6XVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __NE6XVF_IN_REMOVE_TASK, /* device being removed */ + __NE6XVF_TX_TSTAMP_IN_PROGRESS, /* PTP Tx timestamp request in progress */ +}; + +struct ne6xvf_vlan_filter { + struct list_head list; + struct ne6x_vf_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + }; +}; + +struct ne6xvf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + struct { + u8 is_new_mac : 1; /* filter is new, wait for PF decision */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 is_primary : 1; /* filter is a default VF MAC */ + u8 add_handled : 1; /* received response from PF for filter add */ + u8 padding : 3; + }; +}; + +/* Driver state. The order of these is important! */ +enum ne6xvf_state_t { + __NE6XVF_STARTUP, /* driver loaded, probe complete */ + __NE6XVF_REMOVE, /* driver is being unloaded */ + __NE6XVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __NE6XVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */ + __NE6XVF_INIT_CONFIG_ADAPTER, + __NE6XVF_INIT_SW, /* got resources, setting up structs */ + __NE6XVF_INIT_FAILED, /* init failed, restarting procedure */ + __NE6XVF_RESETTING, /* in reset */ + __NE6XVF_COMM_FAILED, /* communication with PF failed */ + /* Below here, watchdog is running */ + __NE6XVF_DOWN, /* ready, can be opened */ + __NE6XVF_DOWN_PENDING, /* descending, waiting for watchdog */ + __NE6XVF_TESTING, /* in ethtool self-test */ + __NE6XVF_RUNNING /* opened, working */ +}; + +struct ne6xvf_mac_info { + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum ne6xvf_bus_speed { + ne6xvf_bus_speed_unknown = 0, + ne6xvf_bus_speed_33 = 33, + ne6xvf_bus_speed_66 = 66, + ne6xvf_bus_speed_100 = 100, + ne6xvf_bus_speed_120 = 120, + ne6xvf_bus_speed_133 = 133, + ne6xvf_bus_speed_2500 = 2500, + ne6xvf_bus_speed_5000 = 5000, + ne6xvf_bus_speed_8000 = 8000, + ne6xvf_bus_speed_reserved +}; + +enum ne6xvf_bus_width { + ne6xvf_bus_width_unknown = 0, + ne6xvf_bus_width_pcie_x1 = 1, + ne6xvf_bus_width_pcie_x2 = 2, + ne6xvf_bus_width_pcie_x4 = 4, + ne6xvf_bus_width_pcie_x8 = 8, + ne6xvf_bus_width_32 = 32, + ne6xvf_bus_width_64 = 64, + ne6xvf_bus_width_reserved +}; + +enum ne6xvf_bus_type { + ne6xvf_bus_type_unknown = 0, + ne6xvf_bus_type_pci, + ne6xvf_bus_type_pcix, + ne6xvf_bus_type_pci_express, + ne6xvf_bus_type_reserved +}; + +struct ne6xvf_bus_info { + enum ne6xvf_bus_speed speed; + enum ne6xvf_bus_width width; + enum ne6xvf_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +struct ne6xvf_hw_capabilities { + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors_vf; + u32 max_mtu; + u32 chip_id; + u32 mac_id; + u32 lport; + u32 vf_id; + u32 num_vf_per_pf; +}; + +struct ne6xvf_hw { + u8 __iomem *hw_addr0; + u8 __iomem *hw_addr2; + void *back; + + /* subsystem structs */ + struct ne6xvf_mac_info mac; + struct ne6xvf_bus_info bus; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* capabilities for entire device and PCI func */ + struct ne6xvf_hw_capabilities dev_caps; + + struct ne6xvf_sdk_mbx_info mbx; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +struct ne6xvf_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +#define NE6XVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define NE6XVF_FLAG_PF_COMMS_FAILED BIT(3) +#define NE6XVF_FLAG_RESET_PENDING BIT(4) +#define NE6XVF_FLAG_RESET_NEEDED BIT(5) +#define NE6XVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define NE6XVF_FLAG_PROMISC_ON BIT(13) +#define NE6XVF_FLAG_ALLMULTI_ON BIT(14) + +#define NE6XVF_FLAG_LEGACY_RX BIT(15) +#define NE6XVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define NE6XVF_FLAG_QUEUES_ENABLED BIT(17) +#define NE6XVF_FLAG_QUEUES_DISABLED BIT(18) +#define NE6XVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define NE6XF_FLAG_REINIT_CHNL_NEEDED BIT(21) +#define NE6XF_FLAG_RESET_DETECTED BIT(22) +#define NE6XF_FLAG_INITIAL_MAC_SET BIT(23) + +#define NE6XVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0) +#define NE6XVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2) +#define NE6XVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3) +#define NE6XVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4) +#define NE6XVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5) +#define NE6XVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6) +#define NE6XVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7) +#define NE6XVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8) +#define NE6XVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ +#define NE6XVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define NE6XVF_FLAG_AQ_GET_HENA BIT_ULL(11) +#define NE6XVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define NE6XVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) +#define NE6XVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) +#define NE6XVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) +#define NE6XVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) +#define NE6XVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) +#define NE6XVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) + +#define NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD BIT_ULL(38) +#define NE6XVF_FLAG_AQ_GET_FEATURE BIT_ULL(39) +#define NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS BIT_ULL(40) +#define NE6XVF_FLAG_AQ_SET_VF_MAC BIT_ULL(41) +#define NE6XVF_FLAG_AQ_CHANGED_RSS BIT_ULL(42) + +struct ne6xvf_adapter { + struct ne6x_adapt_comm comm; + struct work_struct sdk_task; + struct delayed_work watchdog_task; + wait_queue_head_t down_waitqueue; + wait_queue_head_t vc_waitqueue; + struct ne6x_q_vector *q_vectors; + struct list_head vlan_filter_list; + struct list_head mac_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + char misc_vector_name[IFNAMSIZ + 9]; + u16 max_queues; + u16 num_active_queues; + u16 num_req_queues; + u32 hw_feature; + struct ne6x_ring *tg_rings; /* TG */ + struct ne6x_ring *cq_rings; /* CQ */ + u32 cq_desc_count; + + /* TX */ + struct ne6x_ring *tx_rings; + u32 tx_timeout_count; + u32 tx_desc_count; + + /* RX */ + struct ne6x_ring *rx_rings; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + struct msix_entry *msix_entries; + + u32 flags; + + /* duplicates for common code */ +#define NE6XVF_FLAG_DCB_ENABLED 0 + + /* flags for admin queue service task */ + u64 aq_required; + + /* Lock to prevent possible clobbering of + * current_netdev_promisc_flags + */ + spinlock_t current_netdev_promisc_flags_lock; + + netdev_features_t current_netdev_promisc_flags; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + struct net_device_stats net_stats; + + struct ne6xvf_hw hw; /* defined in ne6xvf.h */ + + enum ne6xvf_state_t state; + enum ne6xvf_state_t last_state; + unsigned long crit_section; + + bool netdev_registered; + bool link_up; + enum ne6x_sdk_link_speed link_speed; + enum virtchnl_ops current_op; + struct virtchnl_vf_resource *vf_res; + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + + struct ne6xvf_eth_stats current_stats; + //struct ne6xvf_vsi vsi; + u16 msg_enable; + struct ne6x_rss_info rss_info; + u8 trusted; + +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6xvf_dbg_pf; +#endif /* CONFIG_DEBUG_FS */ +}; + +#ifdef CONFIG_DEBUG_FS +#define NCE_DEBUG_CHAR_LEN 1024 + +struct ne6xvf_dbg_cmd_wr { + char command[NCE_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6xvf_adapter *pf); +}; + +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_init(void); +void ne6xvf_dbg_exit(void); + +#endif + +/* Error Codes */ +enum ne6xvf_status { + NE6XVF_SUCCESS = 0, + NE6XVF_ERR_NVM = -1, + NE6XVF_ERR_NVM_CHECKSUM = -2, + NE6XVF_ERR_PHY = -3, + NE6XVF_ERR_CONFIG = -4, + NE6XVF_ERR_PARAM = -5, + NE6XVF_ERR_MAC_TYPE = -6, + NE6XVF_ERR_UNKNOWN_PHY = -7, + NE6XVF_ERR_LINK_SETUP = -8, + NE6XVF_ERR_ADAPTER_STOPPED = -9, + NE6XVF_ERR_INVALID_MAC_ADDR = -10, + NE6XVF_ERR_DEVICE_NOT_SUPPORTED = -11, + NE6XVF_ERR_MASTER_REQUESTS_PENDING = -12, + NE6XVF_ERR_INVALID_LINK_SETTINGS = -13, + NE6XVF_ERR_AUTONEG_NOT_COMPLETE = -14, + NE6XVF_ERR_RESET_FAILED = -15, + NE6XVF_ERR_SWFW_SYNC = -16, + NE6XVF_ERR_NO_AVAILABLE_VSI = -17, + NE6XVF_ERR_NO_MEMORY = -18, + NE6XVF_ERR_BAD_PTR = -19, + NE6XVF_ERR_RING_FULL = -20, + NE6XVF_ERR_INVALID_PD_ID = -21, + NE6XVF_ERR_INVALID_QP_ID = -22, + NE6XVF_ERR_INVALID_CQ_ID = -23, + NE6XVF_ERR_INVALID_CEQ_ID = -24, + NE6XVF_ERR_INVALID_AEQ_ID = -25, + NE6XVF_ERR_INVALID_SIZE = -26, + NE6XVF_ERR_INVALID_ARP_INDEX = -27, + NE6XVF_ERR_INVALID_FPM_FUNC_ID = -28, + NE6XVF_ERR_QP_INVALID_MSG_SIZE = -29, + NE6XVF_ERR_QP_TOOMANY_WRS_POSTED = -30, + NE6XVF_ERR_INVALID_FRAG_COUNT = -31, + NE6XVF_ERR_QUEUE_EMPTY = -32, + NE6XVF_ERR_INVALID_ALIGNMENT = -33, + NE6XVF_ERR_FLUSHED_QUEUE = -34, + NE6XVF_ERR_INVALID_PUSH_PAGE_INDEX = -35, + NE6XVF_ERR_INVALID_IMM_DATA_SIZE = -36, + NE6XVF_ERR_TIMEOUT = -37, + NE6XVF_ERR_OPCODE_MISMATCH = -38, + NE6XVF_ERR_CQP_COMPL_ERROR = -39, + NE6XVF_ERR_INVALID_VF_ID = -40, + NE6XVF_ERR_INVALID_HMCFN_ID = -41, + NE6XVF_ERR_BACKING_PAGE_ERROR = -42, + NE6XVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + NE6XVF_ERR_INVALID_PBLE_INDEX = -44, + NE6XVF_ERR_INVALID_SD_INDEX = -45, + NE6XVF_ERR_INVALID_PAGE_DESC_INDEX = -46, + NE6XVF_ERR_INVALID_SD_TYPE = -47, + NE6XVF_ERR_MEMCPY_FAILED = -48, + NE6XVF_ERR_INVALID_HMC_OBJ_INDEX = -49, + NE6XVF_ERR_INVALID_HMC_OBJ_COUNT = -50, + NE6XVF_ERR_INVALID_SRQ_ARM_LIMIT = -51, + NE6XVF_ERR_SRQ_ENABLED = -52, + NE6XVF_ERR_ADMIN_QUEUE_ERROR = -53, + NE6XVF_ERR_ADMIN_QUEUE_TIMEOUT = -54, + NE6XVF_ERR_BUF_TOO_SHORT = -55, + NE6XVF_ERR_ADMIN_QUEUE_FULL = -56, + NE6XVF_ERR_ADMIN_QUEUE_NO_WORK = -57, + NE6XVF_ERR_BAD_IWARP_CQE = -58, + NE6XVF_ERR_NVM_BLANK_MODE = -59, + NE6XVF_ERR_NOT_IMPLEMENTED = -60, + NE6XVF_ERR_PE_DOORBELL_NOT_ENABLED = -61, + NE6XVF_ERR_DIAG_TEST_FAILED = -62, + NE6XVF_ERR_NOT_READY = -63, + NE6XVF_NOT_SUPPORTED = -64, + NE6XVF_ERR_FIRMWARE_API_VERSION = -65, + NE6XVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, +}; + +static inline const char *ne6xvf_state_str(enum ne6xvf_state_t state) +{ + switch (state) { + case __NE6XVF_STARTUP: + return "__NE6XVF_STARTUP"; + case __NE6XVF_REMOVE: + return "__NE6XVF_REMOVE"; + case __NE6XVF_INIT_GET_RESOURCES: + return "__NE6XVF_INIT_GET_RESOURCES"; + case __NE6XVF_INIT_EXTENDED_CAPS: + return "__NE6XVF_INIT_EXTENDED_CAPS"; + case __NE6XVF_INIT_CONFIG_ADAPTER: + return "__NE6XVF_INIT_CONFIG_ADAPTER"; + case __NE6XVF_INIT_SW: + return "__NE6XVF_INIT_SW"; + case __NE6XVF_INIT_FAILED: + return "__NE6XVF_INIT_FAILED"; + case __NE6XVF_RESETTING: + return "__NE6XVF_RESETTING"; + case __NE6XVF_COMM_FAILED: + return "__NE6XVF_COMM_FAILED"; + case __NE6XVF_DOWN: + return "__NE6XVF_DOWN"; + case __NE6XVF_DOWN_PENDING: + return "__NE6XVF_DOWN_PENDING"; + case __NE6XVF_TESTING: + return "__NE6XVF_TESTING"; + case __NE6XVF_RUNNING: + return "__NE6XVF_RUNNING"; + default: + return "__NE6XVF_UNKNOWN_STATE"; + } +} + +static inline void ne6xvf_change_state(struct ne6xvf_adapter *adapter, enum ne6xvf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } +} + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init); +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_feature_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter); +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); +void ne6xvf_set_ethtool_ops(struct net_device *netdev); +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter); +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending); +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter); +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter); +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter); + +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter); +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num); +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter); +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll); +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter); +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter); +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len); +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter); +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter); +int ne6xvf_close(struct net_device *netdev); +int ne6xvf_open(struct net_device *netdev); +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter); +void ne6xvf_tail_update(struct ne6x_ring *ring, int val); +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter); + +#endif /* _NE6XVF_H */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c new file mode 100644 index 00000000000000..219b717567fe1e --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6xvf.h" + +#ifdef CONFIG_DEBUG_FS +static struct dentry *ne6xvf_dbg_root; + +static void ne6xvf_showqueue(struct ne6xvf_adapter *pf) +{ + struct ne6x_ring *ring; + u64 head, tail, oft; + int i; + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->rx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_OFST)); + dev_info(&pf->pdev->dev, "----RX: Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_alloc, + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->tx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_OFST)); + dev_info(&pf->pdev->dev, "----TX: Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->cq_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER)); + dev_info(&pf->pdev->dev, "----CQ: Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); +} + +static void ne6xvf_showring(struct ne6xvf_adapter *pf) +{ + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + union ne6x_rx_desc *rx_desc; + struct ne6x_ring *ring; + int j, k; + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->rx_rings[j]; + + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], m_len[%d], s_len[%d], s_addr[0x%llx], m_addr[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, + rx_desc->w.vp, + rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, + rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, + rx_desc->wb.u.val, + rx_desc->wb.vp, + rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->tx_rings[j]; + + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_sop_addr) + /* empty descriptor, skp */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, + tx_desc->u.val, + tx_desc->vp, + tx_desc->event_trigger, + tx_desc->chain, + tx_desc->transmit_type, + tx_desc->sop_valid, + tx_desc->eop_valid, + tx_desc->tso, + tx_desc->l3_csum, + tx_desc->l3_ofst, + tx_desc->l4_csum, + tx_desc->l4_ofst, + tx_desc->pld_ofst, + tx_desc->mop_cnt, + tx_desc->sop_cnt, + tx_desc->mss, + tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->cq_rings[j]; + + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", + k, + ring->reg_idx, + cq_desc->ctype, + cq_desc->num); + } + } +} + +static const struct ne6xvf_dbg_cmd_wr deg_cmd_wr[] = { + {"queue", ne6xvf_showqueue}, + {"ring", ne6xvf_showring}, +}; + +/** + * nce_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +/** + * ne6xvf_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_write(struct file *filp, const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ne6xvf_adapter *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i, cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NCE_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else { + for (i = 0; i < ARRAY_SIZE(deg_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_cmd_wr[i].command, count) == 0) { + deg_cmd_wr[i].command_proc(pf); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6xvf_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6xvf_dbg_command_read, + .write = ne6xvf_dbg_command_write, +}; + +/** + * nce_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6xvf_dbg_pf = debugfs_create_dir(name, ne6xvf_dbg_root); + if (!pf->ne6xvf_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6xvf_dbg_pf, pf, + &ne6xvf_dbg_command_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); +} + +/** + * nce_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf) +{ + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); + pf->ne6xvf_dbg_pf = NULL; +} + +/** + * nce_dbg_init - start up debugfs for the driver + **/ +void ne6xvf_dbg_init(void) +{ + ne6xvf_dbg_root = debugfs_create_dir(ne6xvf_driver_name, NULL); + if (!ne6xvf_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * nce_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6xvf_dbg_exit(void) +{ + debugfs_remove_recursive(ne6xvf_dbg_root); + ne6xvf_dbg_root = NULL; +} + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c new file mode 100644 index 00000000000000..9e17aadd565a57 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_ethtool_stats.h" +#include "ne6xvf_txrx.h" + +static const char ne6xvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Eeprom test (offline)", + "Interrupt test (offline)", + "Link test (on/offline)" +}; + +#define NE6XVF_TEST_LEN (sizeof(ne6xvf_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6xvf_q_stats_len(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6xvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6XVF_NETDEV_STAT(_net_stat) NE6XVF_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* per-queue ring statistics */ +#define NE6XVF_QUEUE_STAT(_name, _stat) NE6XVF_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6xvf_stats ne6xvf_gstrings_tx_queue_stats[] = { + NE6XVF_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6XVF_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6XVF_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6XVF_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_rx_queue_stats[] = { + NE6XVF_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6XVF_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6XVF_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_cq_queue_stats[] = { + NE6XVF_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6XVF_PORT_MAC_STAT(_name, _stat) NE6XVF_STAT(struct ne6xvf_vsi, _name, _stat) + +#define NE6XVF_ALL_STATS_LEN(n) (ne6xvf_q_stats_len(n)) + +#define ne6xvf_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ + ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_mode) + +static void ne6xvf_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + adapter->link_speed); + break; + } + ks->base.duplex = DUPLEX_FULL; +} + +/** + * ne6xvf_get_settings_link_down - Get the Link settings when link is down + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + */ +static void ne6xvf_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +/** + * ne6xvf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Reports speed/duplex settings based on media_type + */ +static int ne6xvf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ks) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + + ks->base.port = PORT_NONE; + if (adapter->link_up) { + /* Set flow control settings */ + ne6xvf_get_settings_link_up(ks, netdev); + } else { + ne6xvf_get_settings_link_down(ks, netdev); + } + + return 0; +} + +/** + * ne6xvf_set_link_ksettings - Set Speed and Duplex + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Set speed/duplex per media_types advertised/forced + */ +static int ne6xvf_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + return -EOPNOTSUPP; +} + +static void __ne6xvf_add_stat_strings(u8 **p, const struct ne6xvf_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6xvf_add_stat_strings(p, stats, ...) \ + __ne6xvf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6xvf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + unsigned int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_tx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_rx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_cq_queue_stats, i); + } +} + +static void ne6xvf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6xvf_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6xvf_gstrings_test, NE6XVF_TEST_LEN * ETH_GSTRING_LEN); + default: + break; + } +} + +static int ne6xvf_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + /* The number (and order) of strings reported *must* remain + * constant for a given netdevice. This function must not + * report a different number based on run time parameters + * (such as the number of queues in use, or the setting of + * a private ethtool flag). This is due to the nature of the + * ethtool stats API. + * + * Userspace programs such as ethtool must make 3 separate + * ioctl requests, one for size, one for the strings, and + * finally one for the stats. Since these cross into + * userspace, changes to the number or size could result in + * undefined memory access or incorrect string<->value + * correlations for statistics. + * + * Even if it appears to be safe, changes to the size or + * order of strings will suffer from race conditions and are + * not safe. + */ + return NE6XVF_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6XVF_TEST_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ne6xvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + + ne6xvf_update_pf_stats(adapter); + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adapter->num_active_queues; j++) { + tx_ring = &adapter->tx_rings[j]; + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = &adapter->rx_rings[j]; + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = &adapter->cq_rings[j]; + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + rcu_read_unlock(); +} + +static void ne6xvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, ne6xvf_driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, ne6xvf_driver_version, sizeof(drvinfo->version)); + strncpy(drvinfo->fw_version, "N/A", 4); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6xvf_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) {} + +static void ne6xvf_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6XVF_TEST_LEN); +} + +static int ne6xvf_get_regs_len(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6xvf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count, new_cq_count; + int err; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_rx_count + new_rx_count; + + if (new_tx_count == adapter->tx_desc_count && new_rx_count == adapter->rx_desc_count) + return 0; + + if (!netif_running(adapter->netdev)) { + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adapter->tx_rings[0].count, adapter->rx_rings[0].count, new_tx_count, + new_rx_count); + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +/** + * ne6xvf_get_pauseparam - Get Flow Control status + * @netdev: netdevice structure + * @pause: buffer to return pause parameters + * + * Return tx/rx-pause status + **/ +static void ne6xvf_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + pause->autoneg = 0; + pause->rx_pause = 0; + pause->tx_pause = 0; +} + +/** + * ne6xvf_get_coalesce - get a netdev's coalesce settings + * @netdev: the netdev to check + * @ec: ethtool coalesce data structure + * + **/ +static int ne6xvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6xvf_get_eeprom_len(struct net_device *netdev) +{ + return 0x64; +} + +static int ne6xvf_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + int blink_freq = 2; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return blink_freq; + case ETHTOOL_ID_ON: + break; + case ETHTOOL_ID_OFF: + break; + case ETHTOOL_ID_INACTIVE: + break; + default: + break; + } + + return 0; +} + +static int ne6xvf_nway_reset(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + data[NE6XVF_ETH_TEST_LINK] = 0; + + /* Offline only tests, not run in online; pass by default */ + data[NE6XVF_ETH_TEST_REG] = 0; + data[NE6XVF_ETH_TEST_EEPROM] = 0; + data[NE6XVF_ETH_TEST_INTR] = 0; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int ne6xvf_get_rss_hash_opts(struct ne6xvf_adapter *adapter, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6xvf_set_rss_hash_opts(struct ne6xvf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adapter->rss_info.hash_type; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adapter->rss_info.hash_type) + return 0; + + adapter->rss_info.hash_type = rss_flags; + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * ne6xvf_set_rxnfc - command to set Rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns 0 for success and negative values for errors + */ +static int ne6xvf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = ne6xvf_set_rss_hash_opts(adapter, info); + break; + default: + break; + } + + return ret; +} + +/** + * iavf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + **/ +static int ne6xvf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + cmd->data = ne6xvf_get_rss_hash_opts(adapter, cmd->flow_type); + break; + default: + break; + } + + return 0; +} + +/** + * ne6xvf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.hash_key_size; +} + +/** + * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.ind_table_size; +} + +/** + * ne6xvf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int ne6xvf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u16 i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (key) + memcpy(key, adapter->rss_info.hash_key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + indir[i] = (u32)adapter->rss_info.ind_table[i]; + } + + return 0; +} + +/** + * ne6xvf_set_rxfh - set the Rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Returns -EINVAL if the table specifies an invalid queue ID, otherwise + * returns 0 after programming the table. + */ +static int ne6xvf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!key && !indir) + return 0; + + if (key) + memcpy(&adapter->rss_info.hash_key[0], key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = (u8)(indir[i]); + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * iavf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void ne6xvf_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adapter->max_queues; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adapter->num_active_queues; +} + +/** + * ne6xvf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int ne6xvf_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > adapter->vf_res->num_queue_pairs) + return -EINVAL; + + if (channels->rx_count == adapter->num_active_queues) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + /* set for the next time the netdev is started */ + if (!netif_running(adapter->netdev)) { + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +static const struct ethtool_ops ne6xvf_ethtool_ops = { + .get_link_ksettings = ne6xvf_get_link_ksettings, + .set_link_ksettings = ne6xvf_set_link_ksettings, + .get_strings = ne6xvf_get_strings, + .get_sset_count = ne6xvf_get_sset_count, + .get_ethtool_stats = ne6xvf_get_ethtool_stats, + .get_drvinfo = ne6xvf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6xvf_get_regs, + .get_regs_len = ne6xvf_get_regs_len, + .self_test = ne6xvf_self_test, + .get_ringparam = ne6xvf_get_ringparam, + .set_ringparam = ne6xvf_set_ringparam, + .get_pauseparam = ne6xvf_get_pauseparam, + .get_coalesce = ne6xvf_get_coalesce, + .get_eeprom_len = ne6xvf_get_eeprom_len, + .get_rxnfc = ne6xvf_get_rxnfc, + .set_rxnfc = ne6xvf_set_rxnfc, + .get_rxfh_key_size = ne6xvf_get_rxfh_key_size, + .get_rxfh_indir_size = ne6xvf_get_rxfh_indir_size, + .get_rxfh = ne6xvf_get_rxfh, + .set_rxfh = ne6xvf_set_rxfh, + .get_channels = ne6xvf_get_channels, + .set_channels = ne6xvf_set_channels, + .set_phys_id = ne6xvf_set_phys_id, + .nway_reset = ne6xvf_nway_reset, + .self_test = ne6xvf_diag_test, +}; + +void ne6xvf_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6xvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h new file mode 100644 index 00000000000000..300a90b6af55ef --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_ETHTOOL_H +#define _NE6XVF_ETHTOOL_H + +#include "ne6xvf.h" + +#define NE6XVF_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6xvf_ethtool_test_id { + NE6XVF_ETH_TEST_REG = 0, + NE6XVF_ETH_TEST_EEPROM, + NE6XVF_ETH_TEST_INTR, + NE6XVF_ETH_TEST_LINK, +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c new file mode 100644 index 00000000000000..bac945007836a2 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c @@ -0,0 +1,3305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" +#include "ne6xvf_virtchnl.h" +#include "ne6xvf_txrx.h" +#include "version.h" + +#define SUMMARY \ + "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function Linux Driver" +#define COPYRIGHT "Copyright (c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6xvf_driver_name[] = "ncevf"; +static const char ne6xvf_driver_string[] = SUMMARY; + +const char ne6xvf_driver_version[] = VERSION; +static const char ne6xvf_copyright[] = COPYRIGHT; + +static const struct pci_device_id ne6xvf_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x501a), 0}, + {PCI_VDEVICE(BZWX, 0x601a), 0}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ne6xvf_pci_tbl); + +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION(SUMMARY); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static const struct net_device_ops ne6xvf_netdev_ops; +struct workqueue_struct *ne6xvf_wq; +static void ne6xvf_sync_features(struct net_device *netdev); + +static struct ne6xvf_adapter *ne6xvf_pdev_to_adapter(struct pci_dev *pdev) +{ + return netdev_priv(pci_get_drvdata(pdev)); +} + +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter) +{ + adapter->flags |= NE6XVF_FLAG_RESET_NEEDED; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + adapter->tx_timeout_count++; + ne6xvf_schedule_reset(adapter); +} + +/** + * nce_get_vsi_stats_struct - Get System Network Statistics + * @vsi: the VSI we care about + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ + +static struct net_device_stats *nce_get_vsi_stats_struct(struct ne6xvf_adapter *adapter) +{ + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +/** + * nce_update_pf_stats - Update PF port stats counters + * @pf: PF whose stats needs to be updated + */ +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter) +{ + struct net_device_stats *ns; /* netdev stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u64 bytes, packets; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + return; + + ns = nce_get_vsi_stats_struct(adapter); + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_active_queues; i++) { + /* locate Tx ring */ + tx_ring = &adapter->tx_rings[i]; + + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + + tx_b += bytes; + tx_p += packets; + + rx_ring = &adapter->rx_rings[i]; + + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + rx_b += bytes; + rx_p += packets; + } + rcu_read_unlock(); + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + + adapter->net_stats.rx_packets = rx_p; + adapter->net_stats.tx_packets = rx_b; + adapter->net_stats.rx_bytes = rx_b; + adapter->net_stats.tx_bytes = tx_b; +} + +static bool ne6xvf_is_remove_in_progress(struct ne6xvf_adapter *adapter) +{ + return test_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); +} + +static void ne6xvf_sdk_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, sdk_task); + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + enum ne6xvf_status ret, v_ret; + enum virtchnl_ops v_op; + u16 pending = 1u; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + goto out; + + event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + goto out; + + do { + ret = ne6xvf_clean_arq_element(hw, &event, &pending); + v_op = (enum virtchnl_ops)le32_to_cpu(event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(event.snap.state); + + if (ret || !v_op) + break; /* No event to process or error cleaning ARQ */ + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + ne6xvf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (pending != 0) + memset(event.msg_buf, 0, NE6XVF_MAX_AQ_BUF_SIZE); + } while (pending); + + if ((adapter->flags & (NE6XVF_FLAG_RESET_PENDING | NE6XVF_FLAG_RESET_NEEDED)) || + adapter->state == __NE6XVF_RESETTING) + goto freedom; + +freedom: + kfree(event.msg_buf); + +out: + return; +} + +static int ne6xvf_check_reset_complete(struct ne6xvf_hw *hw) +{ + u64 rstat; + int i; + + for (i = 0; i < NE6XVF_RESET_WAIT_COMPLETE_COUNT; i++) { + rstat = rd64(hw, NE6XVF_REG_ADDR(0, NE6X_VP_RELOAD)); + if (rstat) + return 0; + + usleep_range(10, 20); + } + + return 0; +} + +static int ne6xvf_init_sdk_mbx(struct ne6xvf_hw *hw) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + union u_ne6x_mbx_snap_buffer_data usnap; + u64 val; + + if (hw->mbx.init_flag) + return -1; + + hw->mbx.sq_data.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + hw->mbx.sq_data.type = VIRTCHNL_OP_UNKNOWN; + hw->mbx.init_flag = 0x1; + + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x2) { + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + mbx_buffer.snap.state = usnap.snap.state; + mbx_buffer.snap.type = usnap.snap.type; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + } + + usleep_range(10, 20); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + return 0; +} + +static void ne6xvf_startup(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ne6xvf_hw *hw = &adapter->hw; + int ret; + + WARN_ON(adapter->state != __NE6XVF_STARTUP); + + adapter->flags &= ~NE6XVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + + ret = ne6xvf_check_reset_complete(hw); + if (ret) { + dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", ret); + goto err; + } + + ret = ne6xvf_init_sdk_mbx(hw); + if (ret) { + dev_err(&pdev->dev, "Failed to init SDK (%d)\n", ret); + goto err; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_GET_RESOURCES); + + return; + +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES + * @adapter: board private structure + */ +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter) +{ + int i, num_req_queues = adapter->num_req_queues; + + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && num_req_queues > adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, adapter->vsi_res->num_queue_pairs); + adapter->flags |= NE6XVF_FLAG_REINIT_MSIX_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + ne6xvf_schedule_reset(adapter); + + return -EAGAIN; + } + adapter->num_req_queues = 0; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + return 0; +} + +/** + * ne6xvf_init_get_resources - third step of driver startup + * @adapter: board private structure + * + * Function process __NE6XVF_INIT_GET_RESOURCES driver state and + * finishes driver initialization procedure. + * When success the state is changed to __NE6XVF_DOWN + * when fails the state is changed to __NE6XVF_INIT_FAILED + **/ +static void ne6xvf_init_get_resources(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int ret; + + WARN_ON(adapter->state != __NE6XVF_INIT_GET_RESOURCES); + + if (!adapter->vf_res) { + adapter->vf_res = kzalloc(struct_size(adapter->vf_res, vsi_res, 1), GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + + adapter->hw_feature = 0x00; + ret = ne6xvf_send_vf_config_msg(adapter, true); + if (ret) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", ret); + goto err; + } + + ret = ne6xvf_get_vf_config(adapter); + if (ret == NE6XVF_ERR_ADMIN_QUEUE_NO_WORK) { + ret = ne6xvf_send_vf_config_msg(adapter, true); + goto err_alloc; + } else if (ret == NE6XVF_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + dev_err(&pdev->dev, + "Unable to get VF config due to PF error condition, not retrying\n"); + return; + } + + if (ret) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", ret); + goto err_alloc; + } + + ret = ne6xvf_parse_vf_resource_msg(adapter); + if (ret) { + dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", ret); + goto err_alloc; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_EXTENDED_CAPS); + return; + +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_napi_disable_all - disable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_disable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + napi_disable(&q_vector->napi); + } +} + +static void ne6xvf_free_queues(struct ne6xvf_adapter *adapter) +{ + if (!adapter->vsi_res) + return; + + adapter->num_active_queues = 0; + kfree(adapter->tg_rings); + adapter->tg_rings = NULL; + kfree(adapter->cq_rings); + adapter->cq_rings = NULL; + kfree(adapter->tx_rings); + adapter->tx_rings = NULL; + kfree(adapter->rx_rings); + adapter->rx_rings = NULL; +} + +/** + * ne6xvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ne6xvf_alloc_queues(struct ne6xvf_adapter *adapter) +{ + int i, num_active_queues; + + /* If we're in reset reallocating queues we don't actually know yet for + * certain the PF gave us the number of queues we asked for but we'll + * assume it did. Once basic reset is finished we'll confirm once we + * start negotiating config with PF. + */ + if (adapter->num_req_queues) + num_active_queues = adapter->num_req_queues; + else + num_active_queues = min_t(int, adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); + + adapter->tg_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + adapter->cq_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + + adapter->tx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + + adapter->rx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + + for (i = 0; i < num_active_queues; i++) { + struct ne6x_ring *tg_ring; + struct ne6x_ring *cq_ring; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + + tg_ring = &adapter->tg_rings[i]; + tg_ring->queue_index = i; + tg_ring->netdev = adapter->netdev; + tg_ring->dev = pci_dev_to_dev(adapter->pdev); + tg_ring->adpt = adapter; + tg_ring->count = adapter->tx_desc_count; + + cq_ring = &adapter->cq_rings[i]; + cq_ring->queue_index = i; + cq_ring->netdev = adapter->netdev; + cq_ring->dev = pci_dev_to_dev(adapter->pdev); + cq_ring->adpt = adapter; + cq_ring->count = adapter->cq_desc_count; + + tx_ring = &adapter->tx_rings[i]; + tx_ring->queue_index = i; + tx_ring->netdev = adapter->netdev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->adpt = adapter; + tx_ring->count = adapter->tx_desc_count; + + rx_ring = &adapter->rx_rings[i]; + rx_ring->queue_index = i; + rx_ring->netdev = adapter->netdev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->adpt = adapter; + rx_ring->count = adapter->rx_desc_count; + } + + adapter->max_queues = num_active_queues; + adapter->num_active_queues = adapter->max_queues; + + return 0; + +err_out: + ne6xvf_free_queues(adapter); + return -ENOMEM; +} + +static void ne6xvf_irq_disable(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + if (!adapter->msix_entries) + return; + + for (i = 0; i < adapter->num_msix_vectors; i++) { + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), 0xffffffffffffffff); + synchronize_irq(adapter->msix_entries[i].vector); + } +} + +static void ne6xvf_free_traffic_irqs(struct ne6xvf_adapter *adapter) +{ + int vector, irq_num, q_vectors; + + if (!adapter->msix_entries) + return; + + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } +} + +static void ne6xvf_free_q_vectors(struct ne6xvf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + if (!adapter->q_vectors) + return; + + num_q_vectors = adapter->num_msix_vectors; + napi_vectors = adapter->num_active_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[q_idx]; + + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + } + + kfree(adapter->q_vectors); + adapter->q_vectors = NULL; +} + +/** + * ne6xvf_disable_vf - disable a VF that failed to reset + * @adapter: private adapter structure + * + * Helper function to shut down the VF when a reset never finishes. + **/ +static void ne6xvf_disable_vf(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *fv, *fvtmp; + struct ne6xvf_mac_filter *f, *ftmp; + + /* reset never finished */ + adapter->flags |= NE6XVF_FLAG_PF_COMMS_FAILED; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + ne6xvf_free_traffic_irqs(adapter); + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* Delete all of the filters */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { + list_del(&fv->list); + kfree(fv); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + memset(adapter->vf_res, 0, struct_size(adapter->vf_res, vsi_res, 1)); + adapter->netdev->flags &= ~IFF_UP; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); +} + +/** + * ne6xvf_acquire_msix_vectors - Setup the MSIX capability + * @adapter: board private structure + * @vectors: number of vectors to request + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_acquire_msix_vectors(struct ne6xvf_adapter *adapter, int vectors) +{ + int v_actual; + + /* We'll want at least 3 (vector_threshold): + * 0) Other (Admin Queue and link, mostly) + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + * + * The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + v_actual = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1, vectors); + if (v_actual != vectors) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts: %d\n", v_actual); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + pci_disable_msi(adapter->pdev); + return v_actual; + } + + adapter->num_msix_vectors = v_actual; + + return 0; +} + +/** + * ne6xvf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ne6xvf_set_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + int vector, v_budget; + int err = 0; + + if (!adapter->vsi_res) + return -EIO; + + v_budget = adapter->num_active_queues; + adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + dev_info(&adapter->pdev->dev, "v_budget:%d, adapter->vf_res->max_vectors: %d\n", v_budget, + adapter->vf_res->max_vectors); + err = ne6xvf_acquire_msix_vectors(adapter, v_budget); +out: + netif_set_real_num_rx_queues(adapter->netdev, v_budget); + netif_set_real_num_tx_queues(adapter->netdev, v_budget); + + return err; +} + +/** + * ne6xvf_fill_rss_lut - Fill the lut with default values + * @adapter: board private structure + **/ +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = i % adapter->num_active_queues; +} + +/** + * ne6xvf_init_rss - Prepare for RSS + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_init_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6x_rss_info *rss_info = &adapter->rss_info; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + ne6xvf_fill_rss_lut(adapter); + netdev_rss_key_fill((void *)&adapter->rss_info.hash_key[0], + adapter->rss_info.hash_key_size); + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + return 0; +} + +/** + * ne6xvf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ne6xvf_alloc_q_vectors(struct ne6xvf_adapter *adapter) +{ + struct ne6x_q_vector *q_vector; + int q_idx, num_q_vectors; + + num_q_vectors = adapter->num_active_queues; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); + if (!adapter->q_vectors) + return -ENOMEM; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + q_vector->adpt = adapter; + q_vector->v_idx = q_idx; + q_vector->reg_idx = q_idx; + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + netif_napi_add(adapter->netdev, &q_vector->napi, ne6xvf_napi_poll); + } + + return 0; +} + +/** + * ne6xvf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +static int ne6xvf_init_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + int err; + + err = ne6xvf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + rtnl_lock(); + err = ne6xvf_set_interrupt_capability(adapter); + rtnl_unlock(); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ne6xvf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); + + return 0; + +err_alloc_q_vectors: + ne6xvf_reset_interrupt_capability(adapter); +err_set_interrupt: + ne6xvf_free_queues(adapter); +err_alloc_queues: + return err; +} + +/** + * ne6xvf_map_vector_to_cq - associate irqs with complete queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_cq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *cq_ring = &adapter->cq_rings[r_idx]; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; +} + +/** + * ne6xvf_map_vector_to_rxq - associate irqs with rx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_rxq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *rx_ring = &adapter->rx_rings[r_idx]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +/** + * ne6xvf_map_vector_to_txq - associate irqs with tx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @t_idx: queue number + **/ +static void ne6xvf_map_vector_to_txq(struct ne6xvf_adapter *adapter, int v_idx, int t_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = &adapter->tx_rings[t_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + q_vector->num_ringpairs++; +} + +/** + * ne6xvf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static void ne6xvf_map_rings_to_vectors(struct ne6xvf_adapter *adapter) +{ + int rings_remaining = adapter->num_active_queues; + int q_vectors; + int ridx; + + q_vectors = adapter->num_msix_vectors; + + for (ridx = 0; ridx < rings_remaining; ridx++) { + ne6xvf_map_vector_to_cq(adapter, ridx, ridx); + ne6xvf_map_vector_to_rxq(adapter, ridx, ridx); + ne6xvf_map_vector_to_txq(adapter, ridx, ridx); + } +} + +/** + * ne6xvf_setup_all_tg_resources - allocate all queues Tg resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tg_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tg_descriptors(&adapter->tg_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "tg Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_cq_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->cq_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_cq_descriptors(&adapter->cq_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tx_descriptors(&adapter->tx_rings[i]); + err |= ne6x_setup_tx_sgl(&adapter->tx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = ne6x_setup_rx_descriptors(&adapter->rx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Rx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t ne6xvf_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + u64 val; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring) + return IRQ_HANDLED; + + napi_schedule_irqoff(&q_vector->napi); + val = rd64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK)); + val |= 1ULL << NE6X_VP_CQ_INTSHIFT; + wr64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), val); + + return IRQ_HANDLED; +} + +/** + * ne6xvf_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void ne6xvf_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector; + + q_vector = container_of(notify, struct ne6x_q_vector, affinity_notify); + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * ne6xvf_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void ne6xvf_irq_affinity_release(struct kref *ref) {} + +/** + * ne6xvf_request_traffic_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * @basename: device basename + * + * Allocates MSI-X vectors for tx and rx handling, and requests + * interrupts from the kernel. + **/ +static int ne6xvf_request_traffic_irqs(struct ne6xvf_adapter *adapter, char *basename) +{ + unsigned int rx_int_idx = 0, tx_int_idx = 0; + unsigned int vector, q_vectors; + int irq_num, err; + int cpu; + + ne6xvf_irq_disable(adapter); + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[vector]; + + irq_num = adapter->msix_entries[vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), "ne6xvf-%s-TxRx-%u", + basename, rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-rx-%u", basename, + rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-tx-%u", basename, + tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, ne6xvf_msix_clean_rings, 0, q_vector->name, q_vector); + if (err) { + dev_info(&adapter->pdev->dev, "Request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6xvf_irq_affinity_notify; + q_vector->affinity_notify.release = ne6xvf_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } + + return err; +} + +/** + * ne6xvf_configure_queues + * @adapter: adapter structure + * + * Request that the PF set up our (previously allocated) queues. + **/ +static void ne6xvf_configure_queues(struct ne6xvf_adapter *adapter) +{ + unsigned int rx_buf_len = NE6X_RXBUFFER_2048; + struct ne6xvf_hw *hw = &adapter->hw; + union ne6x_sq_base_addr sq_base_addr; + union ne6x_rq_base_addr rq_base_addr; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_cq_base_addr cq_base_addr; + union ne6x_cq_cfg cq_cfg; + union ne6x_sq_cfg sq_cfg; + union ne6x_rq_cfg rc_cfg; + int i; + + /* Legacy Rx will always default to a 2048 buffer size. */ +#if (PAGE_SIZE < 8192) + if (!(adapter->flags & NE6XVF_FLAG_LEGACY_RX)) + /* For jumbo frames on systems with 4K pages we have to use + * an order 1 page, so we might as well increase the size + * of our Rx buffer to make better use of the available space + */ + rx_buf_len = NE6X_RXBUFFER_4096; +#endif + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + for (i = 0; i < adapter->num_active_queues; i++) { + /* cq */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->cq_rings[i].tail = (u64 __iomem *)(hw->hw_addr0 + NE6XVF_QC_TAIL1(i)); + adapter->cq_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + cq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = adapter->cq_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = adapter->cq_rings[i].count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG), cq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER), 0x0); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER), 0x0); + + /* tx */ + /* cache tail off for easier writes later */ + adapter->tx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QTX_TAIL1(i)); + adapter->tx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + sq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = adapter->tx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = adapter->tx_rings[i].count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG), sq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER), 0x0); + + /* rx */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->rx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QRX_TAIL1(i)); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; + adapter->rx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + rq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = adapter->rx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rq_block_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = adapter->rx_rings[i].rx_buf_len; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = adapter->rx_rings[i].count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG), rc_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER), 0x0); + } + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x0); + + usleep_range(100, 120); +} + +/** + * ne6xvf_configure - set up transmit and receive data structures + * @adapter: board private structure + **/ +static void ne6xvf_configure(struct ne6xvf_adapter *adapter) +{ + int i; + + ne6xvf_configure_queues(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct ne6x_ring *ring = &adapter->rx_rings[i]; + + ne6x_alloc_rx_buffers(ring, NE6X_DESC_UNUSED(ring)); + usleep_range(1000, 2000); + } +} + +/** + * ne6xvf_napi_enable_all - enable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_enable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + + q_vector = &adapter->q_vectors[q_idx]; + napi = &q_vector->napi; + napi_enable(napi); + } +} + +/** + * ne6xvf_up_complete - Finish the last steps of bringing up a connection + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +static void ne6xvf_up_complete(struct ne6xvf_adapter *adapter) +{ + ne6xvf_change_state(adapter, __NE6XVF_RUNNING); + clear_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_napi_enable_all(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_ENABLE_QUEUES; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_reinit_interrupt_scheme - Reallocate queues and vectors + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_reinit_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + + err = ne6xvf_init_interrupt_scheme(adapter); + if (err) + goto err; + + netif_tx_stop_all_queues(netdev); + + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_map_rings_to_vectors(adapter); +err: + return err; +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter); + +/** + * ne6xvf_handle_reset - Handle hardware reset + * @adapter: pointer to ne6xvf_adapter + * + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. + * + * This function is called in the __NE6XVF_RESETTING driver state. If a reset + * is detected and completes, the driver state changed to __NE6XVF_RUNNING or + * __NE6XVF_DOWN, else driver state will remain in __NE6XVF_RESETTING. + * + * The function is called with the NE6XVF_FLAG_RESET_PENDING flag set and it is + * cleared when a reset is detected and completes. + **/ +static void ne6xvf_handle_reset(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_hw *hw = &adapter->hw; + bool running; + int err, i; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->last_state == __NE6XVF_RUNNING); + + if (running) { + netdev->flags &= ~IFF_UP; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + adapter->link_up = false; + ne6xvf_napi_disable_all(adapter); + } + + pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); + + ne6xvf_irq_disable(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + + /* Set the queues_disabled flag when VF is going through reset + * to avoid a race condition especially for ADQ i.e. when a VF ADQ is + * configured, PF resets the VF to allocate ADQ resources. When this + * happens there's a possibility to hit a condition where VF is in + * running state but the queues haven't been enabled yet. So wait for + * virtchnl success message for enable queues and then unset this flag. + * Don't allow the link to come back up until that happens. + */ + adapter->flags |= NE6XVF_FLAG_QUEUES_DISABLED; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required = 0; + + err = ne6xvf_reinit_interrupt_scheme(adapter); + if (err) + goto reset_err; + + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= NE6XVF_FLAG_AQ_MAP_VECTORS; + + /* We were running when the reset started, so we need + * to restore some state here. + */ + if (running) { + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto reset_err; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto reset_err; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto reset_err; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto reset_err; + + if ((adapter->flags & NE6XVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & NE6XVF_FLAG_REINIT_ITR_NEEDED)) { + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto reset_err; + + adapter->flags &= ~NE6XVF_FLAG_REINIT_MSIX_NEEDED; + } + + ne6xvf_configure(adapter); + + /* ne6xvf_up_complete() will switch device back + * to __NE6XVF_RUNNING + */ + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + netdev->flags |= IFF_UP; + } else { + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + } + + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return; + +reset_err: + if (running) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_free_traffic_irqs(adapter); + netdev->flags &= ~IFF_UP; + } + + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + ne6xvf_disable_vf(adapter); +} + +/** + * ne6xvf_init_process_extended_caps - Part of driver startup + * @adapter: board private structure + * + * Function processes __NE6XVF_INIT_EXTENDED_CAPS driver state. This state + * handles negotiating capabilities for features which require an additional + * message. + * + * Once all extended capabilities exchanges are finished, the driver will + * transition into __NE6XVF_INIT_CONFIG_ADAPTER. + */ +static void ne6xvf_init_process_extended_caps(struct ne6xvf_adapter *adapter) +{ + WARN_ON(adapter->state != __NE6XVF_INIT_EXTENDED_CAPS); + + /* When we reach here, no further extended capabilities exchanges are + * necessary, so we finally transition into __NE6XVF_INIT_CONFIG_ADAPTER + */ + adapter->vsi_res->num_queue_pairs = adapter->vf_res->num_queue_pairs; + adapter->hw_feature = 0x00; + ne6xvf_change_state(adapter, __NE6XVF_INIT_CONFIG_ADAPTER); +} + +/** + * ne6xvf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +static int ne6xvf_process_config(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + netdev_features_t csumo_features; + netdev_features_t vlano_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + + /* advertise support but don't enable by default since only one type of + * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one + * type turns on the other has to be turned off. This is enforced by the + * nce_fix_features() ndo callback. + */ + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; + + netdev->gso_max_size = 65535; + netdev->features = netdev->hw_features; + ne6xvf_sync_features(netdev); + + return 0; +} + +/** + * ne6xvf_init_config_adapter - last part of driver startup + * @adapter: board private structure + * + * After all the supported capabilities are negotiated, then the + * __NE6XVF_INIT_CONFIG_ADAPTER state will finish driver initialization. + */ +static void ne6xvf_init_config_adapter(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int ret; + + WARN_ON(adapter->state != __NE6XVF_INIT_CONFIG_ADAPTER); + + if (ne6xvf_process_config(adapter)) + goto err; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + adapter->flags |= NE6XVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &ne6xvf_netdev_ops; + ne6xvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - ETH_HLEN - ETH_FCS_LEN; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + eth_hw_addr_set(netdev, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + adapter->tx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->rx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->cq_desc_count = adapter->tx_desc_count + adapter->rx_desc_count; + ret = ne6xvf_init_interrupt_scheme(adapter); + if (ret) + goto err_sw_init; + + ne6xvf_map_rings_to_vectors(adapter); + + netif_carrier_off(netdev); + adapter->link_up = false; + if (!adapter->netdev_registered) { + ret = ne6xvf_register_netdev(adapter); + if (ret) + goto err_register; + } + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + wake_up(&adapter->down_waitqueue); + ne6xvf_init_rss(adapter); + adapter->trusted = 0; + return; + +err_register: +err_sw_init: + ne6xvf_reset_interrupt_capability(adapter); +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_process_aq_command - process aq_required flags + * and sends aq command + * @adapter: pointer to ne6xvf adapter structure + * + * Returns 0 on success + * Returns error code if no command was sent + * or error code if the command failed. + **/ +static int ne6xvf_process_aq_command(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_CONFIG) + return ne6xvf_send_vf_config_msg(adapter, false); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD) + return ne6xvf_send_vf_offload_msg(adapter); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_RSS) { + ne6xvf_config_rss_info(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CHANGED_RSS) { + ne6xvf_changed_rss(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_QUEUES) { + if (ne6xvf_request_queues(adapter, adapter->num_active_queues) == 0) { + usleep_range(50, 100); + if (ne6xvf_poll_virtchnl_msg(adapter, &event, + VIRTCHNL_OP_REQUEST_QUEUES) == 0) { + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + } + return 0; + } + if (adapter->aq_required & NE6XVF_FLAG_AQ_ENABLE_QUEUES) { + ne6xvf_enable_queues(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS) { + ne6xvf_vchanel_get_port_link_status(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_SET_VF_MAC) { + ne6xvf_set_vf_addr(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_MAC_FILTER) { + ne6xvf_add_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_MAC_FILTER) { + ne6xvf_del_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_VLAN_FILTER) { + ne6xvf_add_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_VLAN_FILTER) { + ne6xvf_del_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_REQUEST_PROMISC) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_REQUEST_PROMISC; + ne6xvf_set_promiscuous(adapter); + + return 0; + } + return -EAGAIN; +} + +/** + * ne6xvf_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +static bool ne6xvf_asq_done(struct ne6xvf_hw *hw) +{ + return 1; +} + +/** + * ne6xvf_register_netdev - register netdev + * @adapter: pointer to the ne6xvf_adapter struct + * + * Returns 0 if register netdev success + **/ +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter) +{ + char newname[IFNAMSIZ] = {0}; + int ret; + u16 domain_num; + + domain_num = pci_domain_nr(adapter->pdev->bus); + + /* There are some pcie device with the same bus number but with different + * pcie domain, the name of netdev should contain pcie domain number + */ + if (domain_num) + sprintf(newname, "enP%dp%ds0f%dv%d", domain_num, adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + else + sprintf(newname, "enp%ds0f%dv%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + + strcpy(&adapter->netdev->name[0], newname); + dev_info(&adapter->pdev->dev, "name: %s\n", newname); + ret = register_netdev(adapter->netdev); + if (ret) { + sprintf(newname, "enp%ds0f%dv%%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport); + strcpy(&adapter->netdev->name[0], newname); + ret = register_netdev(adapter->netdev); + } + return ret; +} + +static void ne6xvf_watchdog_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, + watchdog_task.work); + struct ne6xvf_hw *hw = &adapter->hw; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + ne6xvf_change_state(adapter, __NE6XVF_COMM_FAILED); + + if (adapter->flags & NE6XVF_FLAG_RESET_NEEDED && adapter->state != __NE6XVF_RESETTING) { + adapter->flags &= ~NE6XVF_FLAG_RESET_NEEDED; + ne6xvf_change_state(adapter, __NE6XVF_RESETTING); + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + } + switch (adapter->state) { + case __NE6XVF_INIT_FAILED: + /* Try again from failed step */ + ne6xvf_change_state(adapter, adapter->last_state); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, HZ); + return; + case __NE6XVF_COMM_FAILED: + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); + return; + case __NE6XVF_RESETTING: + ne6xvf_handle_reset(adapter); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return; + case __NE6XVF_DOWN: + case __NE6XVF_DOWN_PENDING: + case __NE6XVF_TESTING: + case __NE6XVF_RUNNING: + if (adapter->current_op) { + if (!ne6xvf_asq_done(hw)) { + dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); + ne6xvf_send_api_ver(adapter); + } + } else { + int ret = ne6xvf_process_aq_command(adapter); + + /* An error will be returned if no commands were + * processed; use this opportunity to update stats + * if the error isn't -ENOTSUPP + */ + if (ret && ret != -EOPNOTSUPP && adapter->state == __NE6XVF_RUNNING) + ne6xvf_request_stats(adapter); + } + break; + case __NE6XVF_REMOVE: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; + default: + break; + } + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + +restart_watchdog: + queue_work(ne6xvf_wq, &adapter->sdk_task); + if (adapter->aq_required) + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); + else + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(1000)); +} + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_init((struct mutex *)sp); +} + +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_lock((struct mutex *)sp); +} + +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_unlock((struct mutex *)sp); +} + +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_destroy((struct mutex *)sp); +} + +/** + * ne6xvf_find_filter - Search filter list for specific mac filter + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_mac_filter *ne6xvf_find_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_filter - Add a mac filter to the filter list + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_mac_filter *ne6xvf_add_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + f = ne6xvf_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + + list_add_tail(&f->list, &adapter->mac_filter_list); + f->add = true; + f->add_handled = false; + f->is_new_mac = true; + f->is_primary = false; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + } else { + f->remove = false; + } + + return f; +} + +/** + * ne6xvf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +static void ne6xvf_down(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf; + struct ne6xvf_mac_filter *f; + + if (adapter->state <= __NE6XVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* clear the sync flag on all filters */ + __dev_uc_unsync(adapter->netdev, NULL); + __dev_mc_unsync(adapter->netdev, NULL); + + /* remove all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) + f->remove = true; + + /* remove all VLAN filters */ + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) + vlf->remove = true; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!(adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __NE6XVF_RESETTING) { + dev_info(&adapter->pdev->dev, "%s: state->%s\n", __func__, + ne6xvf_state_str(adapter->state)); + /* cancel any current operation */ + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* Schedule operations to close down the HW. Don't wait + * here for this to complete. The watchdog is still running + * and it will take care of this. + */ + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + + /* In case the queue configure or enable operations are still + * pending from when the interface was opened, make sure + * they're canceled here. + */ + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_set_vport_state(struct ne6xvf_adapter *adapter, int tx_state, int rx_state) +{ + if (rx_state) + adapter->hw_feature &= ~NE6X_F_RX_DISABLE; + else + adapter->hw_feature |= NE6X_F_RX_DISABLE; + + if (tx_state) + adapter->hw_feature &= ~NE6X_F_TX_DISABLE; + else + adapter->hw_feature |= NE6X_F_TX_DISABLE; + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog is started, + * and the stack is notified that the interface is ready. + **/ +int ne6xvf_open(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + err = -EIO; + goto unlock; + } + + if (adapter->state == __NE6XVF_RUNNING && !test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); + err = 0; + goto unlock; + } + + if (adapter->state != __NE6XVF_DOWN) { + err = -EBUSY; + goto unlock; + } + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto err_setup_tg; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto err_setup_cq; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* clear any pending interrupts, may auto mask */ + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto err_req_irq; + + ne6xvf_configure(adapter); + + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + ne6xvf_set_vport_state(adapter, true, true); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return 0; + +err_req_irq: + ne6xvf_down(adapter); + ne6xvf_free_traffic_irqs(adapter); +err_setup_rx: + ne6xvf_free_all_rx_resources(adapter); +err_setup_tx: + ne6xvf_free_all_tx_resources(adapter); +err_setup_cq: + ne6xvf_free_all_cq_resources(adapter); +err_setup_tg: + ne6xvf_free_all_tg_resources(adapter); + +unlock: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return err; +} + +/** + * ne6xvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) + * are freed, along with all transmit and receive resources. + **/ +int ne6xvf_close(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_hw *hw = &adapter->hw; + int status; + int i; + + netdev_info(netdev, "close !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->state <= __NE6XVF_DOWN_PENDING) { + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return 0; + } + + ne6xvf_set_vport_state(adapter, false, false); + ne6xvf_down(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + ne6xvf_change_state(adapter, __NE6XVF_DOWN_PENDING); + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + /* If we're closing the interface as part of driver removal then don't + * wait. The VF resources will be reinitialized when the hardware is + * reset. + */ + if (ne6xvf_is_remove_in_progress(adapter)) + return 0; + + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * ne6xvf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + * + * Also, we wait for state to transition to __NE6XVF_DOWN before + * returning. State change occurs in ne6xvf_virtchnl_completion() after + * VF resources are released (which occurs after PF driver processes and + * responds to admin queue commands). + */ + status = wait_event_timeout(adapter->down_waitqueue, adapter->state == __NE6XVF_DOWN, + msecs_to_jiffies(500)); + if (!status) + netdev_dbg(netdev, "Device resources not yet released\n"); + + return 0; +} + +/** + * ne6xvf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (ne6xvf_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} + +/** + * ne6xvf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_mac_filter *f; + + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + f = ne6xvf_find_filter(adapter, addr); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + } + + return 0; +} + +/** + * ne6xvf_promiscuous_mode_changed - check if promiscuous mode bits changed + * @adapter: device specific adapter + */ +static bool ne6xvf_promiscuous_mode_changed(struct ne6xvf_adapter *adapter) +{ + return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) & + (IFF_PROMISC | IFF_ALLMULTI); +} + +/** + * ne6xvf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void ne6xvf_set_rx_mode(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + __dev_uc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + __dev_mc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + return; + } + + if (netdev->flags & IFF_PROMISC) { + adapter->flags |= NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else if (netdev->flags & IFF_ALLMULTI) { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_REQUEST_PROMISC; +} + +/** + * ne6xvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog task. + **/ +static struct net_device_stats *ne6xvf_get_stats(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +static void ne6xvf_sync_features(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + + if (netdev->features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM) + +/** + * nce_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static int ne6xvf_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + adapter->hw_feature |= NE6X_OFFLOAD_UFO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + * nce_fix_features - fix the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static netdev_features_t ne6xvf_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +/** + * ne6xvf_replace_primary_mac - Replace current primary address + * @adapter: board private structure + * @new_mac: new MAC address to be applied + * + * Replace current dev_addr and send request to PF for removal of previous + * primary MAC address filter and addition of new primary MAC filter. + * Return 0 for success, -ENOMEM for failure. + * + * Do not call this with mac_vlan_list_lock! + **/ +static int ne6xvf_replace_primary_mac(struct ne6xvf_adapter *adapter, const u8 *new_mac) +{ + memcpy(adapter->hw.mac.addr, new_mac, 6); + adapter->aq_required |= NE6XVF_FLAG_AQ_SET_VF_MAC; + + /* schedule the watchdog task to immediately process the request */ + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return 0; +} + +/** + * ne6xvf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_set_mac(struct net_device *netdev, void *p) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + int ret; + + netdev_info(netdev, "set mac address %pM\n", addr->sa_data); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (is_multicast_ether_addr(addr->sa_data)) { + netdev_err(netdev, "Invalid Ethernet address %pM\n", addr->sa_data); + return -EINVAL; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + ret = ne6xvf_replace_primary_mac(adapter, addr->sa_data); + + if (ret) + return ret; + + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + ether_addr_equal(netdev->dev_addr, addr->sa_data), + msecs_to_jiffies(2500)); + + /* If ret < 0 then it means wait was interrupted. + * If ret == 0 then it means we got a timeout. + * else it means we got response for set MAC from PF, + * check if netdev MAC was updated to requested MAC, + * if yes then set MAC succeeded otherwise it failed return -EACCES + */ + netdev_info(netdev, "%s,%pM %pM\n", __func__, addr->sa_data, netdev->dev_addr); + if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return -EACCES; + + return 0; +} + +/** + * ne6xvf_do_ioctl - Handle network device specific ioctls + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + * + * Callback to handle the networking device specific ioctls. Used to handle + * the SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl requests that configure Tx and Rx + * timstamping support. + */ +static int ne6xvf_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + return 0; +} + +/** + * ne6xvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu; + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + return 0; +} + +/** + * ne6xvf_find_vlan - Search filter list for specific vlan filter + * @vsi: board private structure + * @vlan: vlan tag + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_find_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_add_vlan_list(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void ne6xvf_del_vlan_list(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_add_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void ne6xvf_del_vlan(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +static int ne6xvf_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + if (!vid) + return 0; + + if (!ne6xvf_add_vlan(adapter, vlan)) + return -ENOMEM; + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +static int ne6xvf_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + ne6xvf_del_vlan(adapter, vlan); + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + *__ne6xvf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type_data: tc offload data + * + * This function processes the config information provided by the + * user to configure traffic classes/queue channels and packages the + * information to request the PF to setup traffic classes. + * + * Returns 0 on success. + **/ +static int __ne6xvf_setup_tc(struct net_device *netdev, void *type_data) +{ + return 0; +} + +/** + * ne6xvf_setup_tc - configure multiple traffic classes + * @dev: network interface device structure + * @type: type of offload + * @type_data: tc offload data + * + * This function is the callback to ndo_setup_tc in the + * netdev_ops. + * + * Returns 0 on success + **/ +static int ne6xvf_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + return __ne6xvf_setup_tc(dev, type_data); +} + +/** + * ne6xvf_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @dev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t ne6xvf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +/** + * ne6xvf_fwd_add_macvlan - Configure MACVLAN interface + * @netdev: Main net device to configure + * @vdev: MACVLAN subordinate device + */ +static void *ne6xvf_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(&adapter->pdev->dev, sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_sync(netdev, mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adapter->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +/** + * ne6xvf_fwd_del_macvlan - Delete MACVLAN interface resources + * @netdev: Main net device + * @accel_priv: MACVLAN sub ordinate device + */ +static void ne6xvf_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (!accel_priv) + return; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_unsync(netdev, mv->mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + list_del(&mv->list); + devm_kfree(&adapter->pdev->dev, mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6xvf_netdev_ops = { + .ndo_open = ne6xvf_open, + .ndo_stop = ne6xvf_close, + .ndo_start_xmit = ne6xvf_lan_xmit_frame, + .ndo_get_stats = ne6xvf_get_stats, + .ndo_set_rx_mode = ne6xvf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ne6xvf_set_mac, + .ndo_do_ioctl = ne6xvf_do_ioctl, + .ndo_change_mtu = ne6xvf_change_mtu, + .ndo_tx_timeout = ne6xvf_tx_timeout, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_setup_tc = ne6xvf_setup_tc, + .ndo_features_check = ne6xvf_features_check, + + .ndo_dfwd_add_station = ne6xvf_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6xvf_fwd_del_macvlan, + + .ndo_fix_features = ne6xvf_fix_features, + .ndo_set_features = ne6xvf_set_features, +}; + +static int ne6xvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6xvf_adapter *adapter = NULL; + struct ne6xvf_hw *hw = NULL; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_regions(pdev, ne6xvf_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + sprintf(name, "enp%ds%df%d", pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + netdev = alloc_netdev_mq(sizeof(struct ne6xvf_adapter), name, NET_NAME_USER, ether_setup, + NE6XVF_MAX_REQ_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + + hw = &adapter->hw; + hw->back = adapter; + + ne6xvf_change_state(adapter, __NE6XVF_STARTUP); + + pci_save_state(pdev); + + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); + + if (!hw->hw_addr0 || !hw->hw_addr2) { + err = -EIO; + goto err_ioremap; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; + + ne6xvf_init_spinlock(&hw->mbx.mbx_spinlock); + spin_lock_init(&adapter->mac_vlan_list_lock); + + INIT_LIST_HEAD(&adapter->mac_filter_list); + INIT_LIST_HEAD(&adapter->vlan_filter_list); + INIT_LIST_HEAD(&adapter->macvlan_list); + + INIT_WORK(&adapter->sdk_task, ne6xvf_sdk_task); + INIT_DELAYED_WORK(&adapter->watchdog_task, ne6xvf_watchdog_task); + + init_waitqueue_head(&adapter->down_waitqueue); + init_waitqueue_head(&adapter->vc_waitqueue); + + ne6xvf_startup(adapter); + ne6xvf_init_get_resources(adapter); + adapter->aq_required = 0; + ne6xvf_init_process_extended_caps(adapter); + ne6xvf_init_config_adapter(adapter); + + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); +#ifdef CONFIG_DEBUG_FS + ne6xvf_dbg_pf_init(adapter); +#endif + hw->debug_mask = 0xffffffff; + return 0; +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ne6xvf_irq_enable_queues - Enable interrupt for specified queues + * @adapter: board private structure + * @mask: bitmap of queues to enable + **/ +static void ne6xvf_irq_enable_queues(struct ne6xvf_adapter *adapter, u32 mask) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_msix_vectors; i++) + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), ~(1ULL << NE6X_VP_CQ_INTSHIFT)); +} + +/** + * ne6xvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + * @flush: boolean value whether to run rd32() + **/ +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush) +{ + ne6xvf_irq_enable_queues(adapter, ~0); +} + +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->tg_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tg_rings[i].desc) { + struct ne6x_ring *tg_ring = &adapter->tg_rings[i]; + /* Zero out the descriptor ring */ + memset(tg_ring->desc, 0, tg_ring->size); + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + if (!tg_ring->netdev) + return; + + dma_free_coherent(tg_ring->dev, tg_ring->size, tg_ring->desc, tg_ring->dma); + tg_ring->desc = NULL; + } +} + +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->cq_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->cq_rings[i].desc) { + struct ne6x_ring *cq_ring = &adapter->cq_rings[i]; + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + if (!cq_ring->netdev) + return; + + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->tx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i].desc) { + struct ne6x_ring *tx_ring = &adapter->tx_rings[i]; + + /* ring already cleared, nothing to do */ + if (tx_ring->tx_buf) { + /* Free all the Tx ring sk_buffs */ + for (idx = 0; idx < tx_ring->count; idx++) + ne6xvf_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_buf[idx]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (tx_ring->netdev) + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + } + + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + kfree(tx_ring->sgl); + } +} + +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->rx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->rx_rings[i].desc) { + struct ne6x_ring *rx_ring = &adapter->rx_rings[i]; + /* ring already cleared, nothing to do */ + if (rx_ring->rx_buf) { + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (idx = 0; idx < rx_ring->count; idx++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[idx]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, + rx_bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + } + + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + } +} + +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + if (!adapter->msix_entries) + return; + + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static void ne6xvf_remove(struct pci_dev *pdev) +{ + struct ne6xvf_adapter *adapter = ne6xvf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf, *vlftmp; + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_mac_filter *f, *ftmp; + struct ne6x_macvlan *mv, *mv_tmp; + int i; + +#ifdef CONFIG_DEBUG_FS + ne6xvf_dbg_pf_exit(adapter); +#endif + + set_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); + cancel_work_sync(&adapter->sdk_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + + if (adapter->netdev_registered) { + /* This will call ne6xvf_close if the device was open previously. + * The Admin Queue and watchdog tasks have already been shut + * down at this point so the driver will rely on + * ne6xvf_request_reset below to disable the queues and handle + * any other Admin Queue-based cleanup normally done as part of + * ne6xvf_close. + */ + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + dev_info(&adapter->pdev->dev, "Removing device\n"); + + /* Shut down all the garbage mashers on the detention level */ + ne6xvf_change_state(adapter, __NE6XVF_REMOVE); + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + ne6xvf_request_reset(adapter); + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + + if (adapter->last_state == __NE6XVF_RESETTING || + (adapter->last_state == __NE6XVF_RUNNING && !(netdev->flags & IFF_UP))) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + + ne6xvf_destroy_spinlock(&hw->mbx.mbx_spinlock); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + /* release vsi vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adapter->macvlan_list, list) + ne6xvf_fwd_del_macvlan(netdev, mv); + + iounmap(hw->hw_addr0); + iounmap(hw->hw_addr2); + pci_release_regions(pdev); + + ne6xvf_free_queues(adapter); + kfree(adapter->vf_res); + adapter->vf_res = NULL; + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static struct pci_driver ne6xvf_driver = { + .name = ne6xvf_driver_name, + .id_table = ne6xvf_pci_tbl, + .probe = ne6xvf_probe, + .remove = ne6xvf_remove, +}; + +static int __init ne6xvf_init_module(void) +{ + int ret; + + pr_info("navf: %s - version %s\n", ne6xvf_driver_string, ne6xvf_driver_version); + + pr_info("%s\n", ne6xvf_copyright); + + ne6xvf_wq = create_singlethread_workqueue(ne6xvf_driver_name); + if (!ne6xvf_wq) { + pr_err("%s: Failed to create workqueue\n", ne6xvf_driver_name); + return -ENOMEM; + } + +#ifdef CONFIG_DEBUG_FS + ne6xvf_dbg_init(); +#endif + ret = pci_register_driver(&ne6xvf_driver); + + return ret; +} + +module_init(ne6xvf_init_module); + +/** + * ne6xvf_exit_module - Driver Exit Cleanup Routine + * + * ne6xvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ne6xvf_exit_module(void) +{ + pci_unregister_driver(&ne6xvf_driver); + destroy_workqueue(ne6xvf_wq); +#ifdef CONFIG_DEBUG_FS + ne6xvf_dbg_exit(); +#endif +} + +module_exit(ne6xvf_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h new file mode 100644 index 00000000000000..600dd9f773663a --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_OSDEP_H +#define _NE6XVF_OSDEP_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp); + +#endif /* _NE6XVF_OSDEP_H */ + diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c new file mode 100644 index 00000000000000..7ba4a802d5b7d7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_txrx.h" + +/** + * ne6xvf_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void ne6xvf_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + struct ne6xvf_hw *hw = &adpt->hw; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT), + (1ULL << NE6X_VP_CQ_INTSHIFT)); + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), + ~(1ULL << NE6X_VP_CQ_INTSHIFT)); + } +} + +/** + * ne6xvf_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +/** + * ne6xvf_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int ne6xvf_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + ring = q_vector->cq.ring; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = &adapter->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen, nsg; + bool jumbo_frame = true; + + tailen = 4; + + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h new file mode 100644 index 00000000000000..0a10c04862a204 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_TXRX_H +#define _NE6XVF_TXRX_H + +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer); +int ne6xvf_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c new file mode 100644 index 00000000000000..a3c3303618efcb --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c @@ -0,0 +1,1123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" + +static int ne6xvf_sdk_send_msg_to_pf(struct ne6xvf_hw *hw, enum virtchnl_ops v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen, + void *cmd_details) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + + mbx_buffer.snap.data[0] = 0; + mbx_buffer.snap.data[1] = 0; + mbx_buffer.snap.data[2] = 0; + mbx_buffer.snap.data[3] = 0; + mbx_buffer.snap.data[4] = 0; + mbx_buffer.snap.data[5] = 0; + + if (msglen) { + if (msglen > NE6XVF_SDK_LARGE_BUF) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_INVALID_SIZE; + } + + memcpy(mbx_buffer.snap.data, msg, msglen); + } + + mbx_buffer.snap.len = msglen; + mbx_buffer.snap.type = v_opcode; + mbx_buffer.snap.state = v_retval; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x2); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + + return 0; +} + +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int err; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + + err = ne6xvf_sdk_send_msg_to_pf(hw, op, VIRTCHNL_STATUS_SUCCESS, msg, len, NULL); + if (err) + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %d, sdk_err %s\n", + op, err, hw->err_str); + + return err; +} + +/** + * ne6xvf_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + enum ne6xvf_status ret_code = 0; + u64 val; + int i; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + if (!(val & 0x2)) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_NOT_READY; + } + + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + e->msg_len = min_t(u16, (u16)usnap.snap.len, e->buf_len); + if (e->msg_buf && e->msg_len != 0) { + for (i = 0; i < e->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) { + e->msg_buf[i] = usnap.snap.data[i]; + e->snap.data[i] = usnap.snap.data[i]; + } + } + + e->snap.type = usnap.snap.type; + e->snap.state = usnap.snap.state; + + if (pending) + *pending = 0; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return ret_code; +} + +/** + * ne6xvf_poll_virtchnl_msg - poll for virtchnl msg matching the requested_op + * @adapter: adapter structure + * @event: event to populate on success + * @op_to_poll: requested virtchnl op to poll for + */ +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll) +{ + struct ne6xvf_arq_event_info rece_event; + struct ne6xvf_hw *hw = &adapter->hw; + enum ne6xvf_status status, v_ret; + enum virtchnl_ops received_op; + int timeout = 50000; + int i; + + rece_event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + rece_event.msg_buf = kzalloc(rece_event.buf_len, GFP_KERNEL); + if (!rece_event.msg_buf) + return NE6XVF_ERR_NO_MEMORY; + + while (1) { + /* When the SDK is empty, ne6xvf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + status = ne6xvf_clean_arq_element(hw, &rece_event, NULL); + if (status) { + if (status == NE6XVF_ERR_NOT_READY && timeout) { + usleep_range(10, 12); + timeout--; + continue; + } + kfree(rece_event.msg_buf); + return status; + } + + received_op = (enum virtchnl_ops)le32_to_cpu(rece_event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(rece_event.snap.state); + if (op_to_poll == received_op) { + memcpy(&event->snap, &rece_event.snap, + sizeof(struct ne6x_mbx_snap_buffer_data)); + event->msg_len = min(rece_event.msg_len, event->buf_len); + if (event->msg_buf) { + for (i = 0; i < event->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) + event->msg_buf[i] = rece_event.msg_buf[i]; + } + break; + } + + ne6xvf_virtchnl_completion(adapter, received_op, v_ret, rece_event.msg_buf, + rece_event.msg_len); + } + + kfree(rece_event.msg_buf); + status = (enum ne6xvf_status)le32_to_cpu(event->snap.state); + + return status; +} + +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter) +{ + int status; + + if (!adapter->vf_res) + return 0; + /* Don't check CURRENT_OP - this is always higher priority */ + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, + &adapter->vf_res->vsi_res[0].default_mac_addr[0], 6); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + return status; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct ne6xvf_virtchnl_version_info vvi; + + vvi.major = NE6XVF_VIRTCHNL_VERSION_MAJOR; + vvi.minor = NE6XVF_VIRTCHNL_VERSION_MINOR; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); + usleep_range(10, 12); + return ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_VERSION); +} + +/** + * ne6xvf_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +static void ne6xvf_vf_parse_hw_config(struct ne6xvf_hw *hw, struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + + hw->dev_caps.max_mtu = msg->max_mtu; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) { + ether_addr_copy(hw->mac.perm_addr, vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, vsi_res->default_mac_addr); + } + vsi_res++; + } +} + +/** + * ne6xvf_get_vf_config + * @adapter: private adapter structure + * + * Get VF configuration from PF and populate hw structure. Must be called after + * admin queue is initialized. Busy waits until response is received from PF, + * with maximum timeout. Response from PF is returned in the buffer for further + * processing by the caller. + **/ +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + int err; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + err = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_RESOURCES); + + hw->dev_caps.vf_id = event.msg_buf[0]; + hw->dev_caps.chip_id = 0x0; + hw->dev_caps.lport = event.msg_buf[1]; + hw->dev_caps.mac_id = event.msg_buf[2]; + hw->dev_caps.base_queue = event.msg_buf[3]; + hw->dev_caps.num_vf_per_pf = event.msg_buf[5]; + adapter->vf_res->num_vsis = 0x1; + adapter->vf_res->num_queue_pairs = event.msg_buf[4]; + adapter->vf_res->max_vectors = event.msg_buf[4]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + adapter->comm.port_info = hw->dev_caps.lport | (hw->dev_caps.vf_id << 8); + + dev_info(&adapter->pdev->dev, "vf %d Get Resource [ lport: %d, mac_id: %d, base: %d, queue: %d, err = %d]\n", + hw->dev_caps.vf_id, hw->dev_caps.lport, hw->dev_caps.mac_id, + hw->dev_caps.base_queue, adapter->vf_res->num_queue_pairs, err); + + ne6xvf_vf_parse_hw_config(hw, adapter->vf_res); + + return err; +} + +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + struct ne6x_vf_vlan vlan; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + event.buf_len = 0; + event.msg_buf = NULL; + + vlan = NE6X_VF_VLAN(0xfff, ETH_P_8021Q); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan, sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + + return 0; +} + +/** + * ne6xvf_send_vf_config_msg + * @adapter: adapter structure + * + * Send VF configuration request admin queue message to the PF. The reply + * is not checked in this function. Returns 0 if the message was + * successfully sent, or one of the NE6XVF_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init) +{ + u8 mac_addr[ETH_ALEN]; + + adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_CONFIG; + if (b_init) { + eth_random_addr(mac_addr); + mac_addr[0] = 0x02; + mac_addr[1] = 0x31; + mac_addr[2] = 0x3a; + } else { + memcpy(mac_addr, adapter->vf_res->vsi_res[0].default_mac_addr, 6); + } + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, mac_addr, 6); + + /* mac addr need get for PF */ + adapter->vf_res->vsi_res[0].default_mac_addr[0] = mac_addr[0]; + adapter->vf_res->vsi_res[0].default_mac_addr[1] = mac_addr[1]; + adapter->vf_res->vsi_res[0].default_mac_addr[2] = mac_addr[2]; + adapter->vf_res->vsi_res[0].default_mac_addr[3] = mac_addr[3]; + adapter->vf_res->vsi_res[0].default_mac_addr[4] = mac_addr[4]; + adapter->vf_res->vsi_res[0].default_mac_addr[5] = mac_addr[5]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + return 0; +} + +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter) +{ + adapter->current_op = VIRTCHNL_OP_CONFIG_OFFLOAD; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + dev_info(&adapter->pdev->dev, "adapter->hw_feature = 0x%08X\n", adapter->hw_feature); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_OFFLOAD, (u8 *)&adapter->hw_feature, 4); + + return 0; +} + +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter) +{ + int count, size = sizeof(struct ne6x_rss_info); + int index, status; + u8 *plut_info = (u8 *)&adapter->rss_info; + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS; + + count = (size + NE6XVF_SDK_LARGE_BUF - 1) / NE6XVF_SDK_LARGE_BUF; + + for (index = 0; index < count; index++) { + event.buf_len = 0; + event.msg_buf = NULL; + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS, + &plut_info[index * NE6XVF_SDK_LARGE_BUF], + ((size - index * NE6XVF_SDK_LARGE_BUF) > + NE6XVF_SDK_LARGE_BUF) + ? NE6XVF_SDK_LARGE_BUF + : (size - index * NE6XVF_SDK_LARGE_BUF)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CONFIG_RSS); + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + event.msg_buf = NULL; + event.buf_len = 0; + + adapter->current_op = VIRTCHNL_OP_CHANGED_RSS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CHANGED_RSS, (u8 *)&adapter->num_active_queues, + sizeof(adapter->num_active_queues)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CHANGED_RSS); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CHANGED_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request feature, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_GET_VF_FEATURE; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_FEATURE, NULL, 0); + + return 0; +} + +/** + * ne6xvf_request_stats + * @adapter: adapter structure + * + * Request VSI statistics from PF. + **/ +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter) +{ + ne6xvf_update_pf_stats(adapter); +} + +/** + * ne6xvf_request_queues + * @adapter: adapter structure + * @num: number of requested queues + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num) +{ + struct ne6xvf_virtchnl_vf_res_request vfres; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + vfres.num_queue_pairs = 1; + vfres.need_reset = 0x0; + + adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, (u8 *)&vfres, sizeof(vfres)); +} + +/** + * ne6xvf_enable_queues + * @adapter: adapter structure + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, NULL, 0); + return 0; +} + +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + int status; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + status = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_FEATURE); + if (status == 0) { + adapter->hw_feature = event.snap.data[3]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[2]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[1]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[0]; + dev_info(&adapter->pdev->dev, "vf %d get feature 0x%08X\n", + adapter->hw.dev_caps.vf_id, adapter->hw_feature); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + kfree(event.msg_buf); + + return status; +} + +/** + * ne6xvf_add_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + len = struct_size(veal, list, count); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + f->add = false; + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_SET_VF_ADDR; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_SET_VF_ADDR, adapter->hw.mac.addr, 6); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_SET_VF_MAC; +} + +/** + * ne6xvf_del_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f, *temp; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + len = struct_size(veal, list, count); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry_safe(f, temp, &adapter->mac_filter_list, list) { + if (f->remove) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +#define NE6XVF_MAX_SPEED_STRLEN 13 + +/** + * ne6xvf_print_link_message - print link up or down + * @adapter: adapter structure + * + * Log a message telling the world of our wonderous link status + */ +static void ne6xvf_print_link_message(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int link_speed_mbps; + char *speed; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + + speed = kcalloc(1, NE6XVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + link_speed_mbps = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + link_speed_mbps = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + link_speed_mbps = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + link_speed_mbps = SPEED_10000; + break; + default: + link_speed_mbps = SPEED_UNKNOWN; + break; + } + + snprintf(speed, NE6XVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps / 1000, "Gbps"); + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); + + kfree(speed); +} + +/** + * ne6xvf_set_promiscuous + * @adapter: adapter structure + * @flags: bitmask to control unicast/multicast promiscuous. + * + * Request that the PF enable promiscuous mode for our VSI. + **/ +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_virtchnl_promisc_info vpi; + int flags = 0; + + dev_warn(&adapter->pdev->dev, "%s: ....\n", __func__); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + + if (adapter->flags & NE6XVF_FLAG_PROMISC_ON) { + adapter->hw_feature |= NE6X_F_PROMISC; + flags |= FLAG_VF_UNICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_PROMISC; + } + + if (adapter->flags & NE6XVF_FLAG_ALLMULTI_ON) { + adapter->hw_feature |= NE6X_F_RX_ALLMULTI; + flags |= FLAG_VF_MULTICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + } + + vpi.vsi_id = adapter->vsi_res->vsi_id; + vpi.flags = flags; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, (u8 *)&vpi, sizeof(vpi)); +} + +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + u8 msg[8] = {0}; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get_link_status, command %d pending\n", + adapter->current_op); + return; + } + + /* pass queue info to vf */ + msg[0] = hw->dev_caps.base_queue; + msg[1] = adapter->num_active_queues; + + adapter->current_op = VIRTCHNL_OP_GET_PORT_STATUS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_PORT_STATUS, msg, 2); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; +} + +/** + * ne6xvf_virtchnl_completion + * @adapter: adapter structure + * @v_opcode: opcode sent by PF + * @v_retval: retval sent by PF + * @msg: message sent by PF + * @msglen: message length + * + * Asynchronous completion function for admin queue messages. Rather than busy + * wait, we fire off our requests and assume that no errors will be returned. + * This function handles the reply messages. + **/ +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen) +{ + struct net_device *netdev = adapter->netdev; + + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; + bool link_up = vpe->link_status; + enum ne6x_sdk_link_speed old_link_speed = adapter->link_speed; + + switch (vpe->event) { + case NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE: + adapter->link_speed = (vpe->link_speed_0 << 24) | + (vpe->link_speed_1 << 16) | + (vpe->link_speed_2 << 8) | + vpe->link_speed_3; + if (adapter->current_op == VIRTCHNL_OP_GET_PORT_STATUS) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) { + if (link_up && old_link_speed != adapter->link_speed) + ne6xvf_print_link_message(adapter); + + break; + } + + if (link_up) { + /* If we get link up message and start queues + * before our queues are configured it will + * trigger a TX hang. In that case, just ignore + * the link status message,we'll get another one + * after we enable queues and actually prepared + * to send traffic. + */ + if (adapter->state != __NE6XVF_RUNNING) + break; + + /* For ADQ enabled VF, we reconfigure VSIs and + * re-allocate queues. Hence wait till all + * queues are enabled. + */ + if (adapter->flags & NE6XVF_FLAG_QUEUES_DISABLED) + break; + } + + adapter->link_up = link_up; + if (link_up) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + ne6xvf_print_link_message(adapter); + break; + case NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING: + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); + break; + default: + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", vpe->event); + break; + } + return; + } + + if (v_opcode == VIRTCHNL_OP_VF_CONFIG) { + struct virtchnl_vf_config *vfconfig = (struct virtchnl_vf_config *)msg; + + dev_info(&adapter->pdev->dev, "vf_vonfig_data from the PF,type= %d,value = %d\n", + vfconfig->type, vfconfig->data[0]); + switch (vfconfig->type) { + case VIRTCHNL_VF_CONFIG_TRUST: + adapter->trusted = vfconfig->data[0]; + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + break; + default: + break; + } + return; + } + + if (v_retval) { + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + dev_err(&adapter->pdev->dev, "Failed to change MAC address\n"); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op != VIRTCHNL_OP_SET_VF_ADDR) + return; + + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d to our request %d\n", + v_retval, v_opcode); + + /* Assume that the ADQ configuration caused one of the + * v_opcodes in this if statement to fail. Set the + * flag so the reset path can return to the pre-ADQ + * configuration and traffic can resume + */ + if ((v_opcode == VIRTCHNL_OP_ENABLE_QUEUES || + v_opcode == VIRTCHNL_OP_CONFIG_IRQ_MAP || + v_opcode == VIRTCHNL_OP_CONFIG_ADPT_QUEUES)) { + dev_err(&adapter->pdev->dev, + "ADQ is enabled and opcode %d failed (%d)\n", v_opcode, + v_retval); + netdev_reset_tc(netdev); + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + ne6xvf_schedule_reset(adapter); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + return; + } + } + } + + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + if (!v_retval) { + if (msglen != 0 && msg) { + netif_addr_lock_bh(netdev); + ether_addr_copy(adapter->hw.mac.addr, msg); + eth_hw_addr_set(netdev, msg); + netif_addr_unlock_bh(netdev); + } + } + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op == VIRTCHNL_OP_SET_VF_ADDR) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + memcpy(adapter->vf_res, msg, msglen); + ne6xvf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + netif_addr_lock_bh(netdev); + /* refresh current mac address if changed */ + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + netif_addr_unlock_bh(netdev); + } + + ne6xvf_parse_vf_resource_msg(adapter); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + memcpy(&adapter->hw_feature, msg, 4); + dev_info(&adapter->pdev->dev, "%s: hw_featrue = 0x%08X\n", + ne6xvf_state_str(adapter->state), adapter->hw_feature); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + /* enable transmits */ + if (adapter->state == __NE6XVF_RUNNING) { + ne6xvf_irq_enable(adapter, true); + /* If queues not enabled when handling link event, + * then set carrier on now + */ + if (adapter->link_up && !netif_carrier_ok(netdev)) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } + } + adapter->flags |= NE6XVF_FLAG_QUEUES_ENABLED; + adapter->flags &= ~NE6XVF_FLAG_QUEUES_DISABLED; + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + adapter->flags &= ~NE6XVF_FLAG_QUEUES_ENABLED; + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + /* Don't display an error if we get these out of sequence. + * If the firmware needed to get kicked, we'll get these and + * it's no problem. + */ + if (v_opcode != adapter->current_op) + return; + + break; + case VIRTCHNL_OP_REQUEST_QUEUES: { + struct ne6xvf_virtchnl_vf_res_request *vfres = + (struct ne6xvf_virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { + dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", + adapter->num_req_queues, vfres->num_queue_pairs); + adapter->num_req_queues = 0; + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + } + } break; + default: + if (adapter->current_op && v_opcode != adapter->current_op) + dev_dbg(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); + + break; + } /* switch v_opcode */ + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +/** + * ne6xvf_add_vlans + * @adapter: adapter structure + * + * Request that the PF add one or more VLAN filters to our VSI. + **/ +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f = NULL; + struct ne6x_vf_vlan *vlan = NULL; + int len = 0, i = 0, count = 0; + + dev_info(&adapter->pdev->dev, "%s: adapter->current_op:%d\n", __func__, + adapter->current_op); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(struct ne6x_vf_vlan) * count; + vlan = kzalloc(len, GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + f->add = false; + f->is_new_vlan = true; + if (i == count) + break; + } + } + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} + +/** + * ne6xvf_del_vlans + * @adapter: adapter structure + * + * Request that the PF remove one or more VLAN filters from our VSI. + **/ +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f, *ftmp; + struct ne6x_vf_vlan *vlan = NULL; + int i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + vlan = kcalloc(count, sizeof(*vlan), GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h new file mode 100644 index 00000000000000..1fae0b1922dccc --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_VIRTCHNL_H +#define _NE6XVF_VIRTCHNL_H + +#define NE6XVF_SDK_LARGE_BUF 6 + +struct ne6xvf_spinlock { + /* mutext lock */ + struct mutex spinlock; +}; + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + + /* see enum virtchnl_vsi_type */ + s32 vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 type; + u8 pad; +}; + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[]; +}; + +enum nacf_virtchnl_vsi_type { + NE6XVF_VIRTCHNL_VSI_TYPE_INVALID = 0, + NE6XVF_VIRTCHNL_VSI_SRIOV = 6, +}; + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[]; +}; + +struct ne6xvf_arq_event_info { + struct ne6x_mbx_snap_buffer_data snap; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* VF resource request */ +struct ne6xvf_virtchnl_vf_res_request { + u16 num_queue_pairs; + u8 need_reset; + u8 rsv; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct ne6xvf_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +struct ne6xvf_sdk_mbx_info { + struct ne6xvf_spinlock mbx_spinlock; + struct ne6x_mbx_snap_buffer_data sq_data; + struct ne6x_mbx_snap_buffer_data cq_data; + int init_flag; +}; + +#define NE6XVF_VIRTCHNL_VERSION_MAJOR 1 +#define NE6XVF_VIRTCHNL_VERSION_MINOR 1 + +struct ne6xvf_virtchnl_version_info { + u8 major; + u8 minor; +}; + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum ne6xvf_virtchnl_event_codes { + NE6XVF_VIRTCHNL_EVENT_UNKNOWN = 0, + NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE, + NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING, + NE6XVF_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + NE6XVF_VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u8 link_speed_0; + u8 link_speed_1; + u8 link_speed_2; + u8 link_speed_3; + u8 link_status; +}; + +#endif diff --git a/drivers/net/ethernet/guangruntong/Kconfig b/drivers/net/ethernet/guangruntong/Kconfig new file mode 100644 index 00000000000000..cc6a729b78da08 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/Kconfig @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Guangruntong network device configuration +# + +config NET_VENDOR_GRT + bool "Guanruntong devices" + depends on PCI + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Intel cards. If you say Y, you will be asked for + your specific card in the following questions. + +config GRTNIC + tristate "Guangruntong PCI Express adapters support" + depends on NET_VENDOR_GRT + help + This driver supports Guangruntong PCI Express family of + adapters. diff --git a/drivers/net/ethernet/guangruntong/Makefile b/drivers/net/ethernet/guangruntong/Makefile new file mode 100644 index 00000000000000..d806bde47f16d3 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/Makefile @@ -0,0 +1,245 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 1999 - 2021 Intel Corporation. + +ifneq ($(KERNELRELEASE),) +# kbuild part of makefile +# +# Makefile for the Intel(R) 10GbE PCI Express Linux Network Driver +# + +obj-$(CONFIG_GRTNIC_XGB) += grtnic_xgb.o + +define grtnic_xgb-y + grtnic_main.o + grtnic_netdev.o + grtnic_macphy.o + grtnic_param.o + grtnic_nvm.o + grtnic_ethtool.o + grtnic_proc.o +endef +grtnic_xgb-y := $(strip ${grtnic_xgb-y}) +grtnic_xgb-y += kcompat.o + +else # ifneq($(KERNELRELEASE),) +# normal makefile + +DRIVER := grtnic_xgb + +# Check that kernel version is at least 2.6.0, since we don't support 2.4.x +# kernels with the grtnic driver. We can't use minimum_kver_check since SLES 10 +# SP4's Make has a bug which causes $(eval) inside an ifeq conditional to error +# out. This was fixed in Make 3.81, but SLES 10 SP4 does not have a fix for +# this yet. +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,0) ]; echo "$?")) + $(warning *** Aborting the build.) + $(error This driver is not supported on kernel versions older than 2.6.0) +endif + +###################### +# Kernel Build Macro # +###################### + +# customized kernelbuild function +# +# ${1} is the kernel build target +# ${2} may contain extra rules to pass to kernelbuild macro +# +# We customize the kernelbuild target in order to provide our hack to disable +# CONFIG_PTP_1588_CLOCK support should -DNO_PTP_SUPPORT be defined in the extra +# cflags given on the command line. +devkernelbuild = $(call kernelbuild,$(if $(filter -DNO_PTP_SUPPORT,${EXTRA_CFLAGS}),CONFIG_PTP_1588_CLOCK=n) ${2},${1}) + +# Command to update initramfs or display a warning message +ifeq (${cmd_initrd},) +define cmd_initramfs +@echo "Unable to update initramfs. You may need to do this manually." +endef +else +define cmd_initramfs +@echo "Updating initramfs..." +-@$(call cmd_initrd) +endef +endif + +############### +# Build rules # +############### + +# Standard compilation, with regular output +default: + @+$(call devkernelbuild,modules) + +# Noisy output, for extra debugging +noisy: + @+$(call devkernelbuild,modules,V=1) + +# Silence any output generated +silent: + @+$(call devkernelbuild,modules,>/dev/null) + +# Enable higher warning level +checkwarnings: clean + @+$(call devkernelbuild,modules,W=1) + +# Run sparse static analyzer +sparse: clean + @+$(call devkernelbuild,modules,C=2 CF="-D__CHECK_ENDIAN__ -Wbitwise -Wcontext") + +# Run coccicheck static analyzer +ccc: clean + @+$(call devkernelbuild,modules,coccicheck MODE=report)) + +# Build manfiles +manfile: + @gzip -c ../${DRIVER}.${MANSECTION} > ${DRIVER}.${MANSECTION}.gz + +# Clean the module subdirectories +clean: + @+$(call devkernelbuild,clean) + @-rm -rf *.${MANSECTION}.gz *.ko + +kylin: EXTRA_CFLAGS += -DKYLIN_KERNEL +kylin: + @echo "Compile for Kylin kernel..." + @+$(call devkernelbuild,modules) + +kylin44: EXTRA_CFLAGS += -DKYLIN_KERNEL44 +kylin44: + @echo "Compile for Kylin4.4 kernel..." + @+$(call devkernelbuild,modules) + +uos: EXTRA_CFLAGS += -DUOS_KERNEL +uos: + @echo "Compile for Uos kernel..." + @+$(call devkernelbuild,modules) + +euler: EXTRA_CFLAGS += -DEULER_KERNEL +euler: + @echo "Compile for OpenEuler kernel..." + @+$(call devkernelbuild,modules) + + +# Install the modules and manpage +mandocs_install: manfile + @echo "Copying manpages..." + @install -D -m 644 ${DRIVER}.${MANSECTION}.gz ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz + +# Install kernel module files. This target is called by the RPM specfile +# when generating binary RPMs, and is not expected to modify files outside +# of the build root. Thus, it must not update initramfs, or run depmod. +modules_install: default + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +kylin_modules_install: kylin + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +kylin44_modules_install: kylin44 + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +uos_modules_install: uos + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +euler_modules_install: euler + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +# After installing all the files, perform necessary work to ensure the +# system will use the new modules. This includes running depmod to update +# module dependencies and updating the initramfs image in case the module is +# loaded during early boot. +install: modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +kylin_install: kylin_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +kylin44_install: kylin44_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +uos_install: uos_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +euler_install: euler_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +mandocs_uninstall: + if [ -e ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ] ; then \ + rm -f ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ; \ + fi; + +# Remove installed module files. This target is called by the RPM specfile +# when generating binary RPMs, and is not expected to modify files outside +# of the build root. Thus, it must not update the initramfs image or run +# depmod. +modules_uninstall: + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/${DRIVER}.ko; + +# After uninstalling all the files, perform necessary work to restore the +# system back to using the default kernel modules. This includes running +# depmod to update module dependencies and updating the initramfs image. +uninstall: modules_uninstall + $(call cmd_depmod) + $(call cmd_initramfs) + +######## +# Help # +######## +help: + @echo 'Build targets:' + @echo ' default - Build module(s) with standard verbosity' + @echo ' kylin - Build module(s) for kylin Kernel' + @echo ' kylin44 - Build module(s) for kylin 4.4 Kernel' + @echo ' uos - Build module(s) for uos Kernel' + @echo ' euler - Build module(s) for euler Kernel' + @echo ' noisy - Build module(s) with V=1 verbosity -- very noisy' + @echo ' silent - Build module(s), squelching all output' + @echo '' + @echo 'Static Analysis:' + @echo ' checkwarnings - Clean, then build module(s) with W=1 warnings enabled' + @echo ' sparse - Clean, then check module(s) using sparse' + @echo ' ccc - Clean, then check module(s) using coccicheck' + @echo '' + @echo 'Cleaning targets:' + @echo ' clean - Clean files generated by kernel module build' + @echo '' + @echo 'Other targets:' + @echo ' manfile - Generate a gzipped manpage' + @echo ' modules_install - Install the module(s) only' + @echo ' mandocs_install - Install the manpage only' + @echo ' install - Build then install the module(s) and manpage, and update initramfs' + @echo ' kylin_install - Build then install kylin module(s) and update initramfs' + @echo ' kylin44_install - Build then install kylin 4.4 module(s) and update initramfs' + @echo ' uos_install - Build then install uos module(s) and update initramfs' + @echo ' euler_install - Build then install OpenEuler module(s) and update initramfs' + @echo ' modules_uninstall - Uninstall the module(s) only' + @echo ' mandocs_uninstall - Uninstall the manpage only' + @echo ' uninstall - Uninstall the module(s) and manpage, and update initramfs' + @echo ' help - Display this help message' + @echo '' + @echo 'Variables:' + @echo ' LINUX_VERSION - Debug tool to force kernel LINUX_VERSION_CODE. Use at your own risk.' + @echo ' W=N - Kernel variable for setting warning levels' + @echo ' V=N - Kernel variable for setting output verbosity' + @echo ' INSTALL_MOD_PATH - Add prefix for the module and manpage installation path' + @echo ' INSTALL_MOD_DIR - Use module directory other than updates/drivers/net/ethernet/intel/${DRIVER}' + @echo ' Other variables may be available for tuning make process, see' + @echo ' Kernel Kbuild documentation for more information' + +.PHONY: default noisy clean manfile silent sparse ccc install uninstall help + +endif # ifneq($(KERNELRELEASE),) diff --git a/drivers/net/ethernet/guangruntong/auxiliary_compat.h b/drivers/net/ethernet/guangruntong/auxiliary_compat.h new file mode 100644 index 00000000000000..d8b9c9e1dc4443 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/auxiliary_compat.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2024 Intel Corporation */ + +#ifndef _AUXILIARY_COMPAT_H_ +#define _AUXILIARY_COMPAT_H_ + +/* This file contains only the minimal set of kernel compatibility backports + * required by auxiliary.c to build. It is similar to the kcompat.h file, but + * reduced to an absolute minimum in order to reduce the risk of generating + * different kernel symbol CRC values at build time. + * + * For a detailed discussion of kernel symbol CRCs, please read: + * + * Documentation/kernel-symbol-crc.rst + * + * Include only the minimum required kernel compatibility implementations from + * kcompat_generated_defs.h and kcompat_impl.h. If a new fix is required, + * please first implement it as part of the kcompat project before porting it + * to this file. + * + * The current list of required implementations is: + * + * NEED_BUS_FIND_DEVICE_CONST_DATA + * NEED_DEV_PM_DOMAIN_ATTACH + * NEED_DEV_PM_DOMAIN_DETACH + * + * Note that kernels since v5.11 support auxiliary as a built-in config + * option. Using this is always preferred to using an out-of-tree module when + * available. + */ + +#include "kcompat_generated_defs.h" + +/**************************** + * Backport implementations * + ****************************/ + +#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA +/* NEED_BUS_FIND_DEVICE_CONST_DATA + * + * bus_find_device() was updated in upstream commit 418e3ea157ef + * ("bus_find_device: Unify the match callback with class_find_device") + * to take a const void *data parameter and also have the match() function + * passed in take a const void *data parameter. + * + * all of the kcompat below makes it so the caller can always just call + * bus_find_device() according to the upstream kernel without having to worry + * about const vs. non-const arguments. + */ +struct _kc_bus_find_device_custom_data { + const void *real_data; + int (*real_match)(struct device *dev, const void *data); +}; + +static inline int _kc_bus_find_device_wrapped_match(struct device *dev, void *data) +{ + struct _kc_bus_find_device_custom_data *custom_data = data; + + return custom_data->real_match(dev, custom_data->real_data); +} + +static inline struct device * +_kc_bus_find_device(struct bus_type *type, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct _kc_bus_find_device_custom_data custom_data = {}; + + custom_data.real_data = data; + custom_data.real_match = match; + + return bus_find_device(type, start, &custom_data, + _kc_bus_find_device_wrapped_match); +} + +/* force callers of bus_find_device() to call _kc_bus_find_device() on kernels + * where NEED_BUS_FIND_DEVICE_CONST_DATA is defined + */ +#define bus_find_device(type, start, data, match) \ + _kc_bus_find_device(type, start, data, match) +#endif /* NEED_BUS_FIND_DEVICE_CONST_DATA */ + +#if defined(NEED_DEV_PM_DOMAIN_ATTACH) && defined(NEED_DEV_PM_DOMAIN_DETACH) +#include +/* NEED_DEV_PM_DOMAIN_ATTACH and NEED_DEV_PM_DOMAIN_DETACH + * + * dev_pm_domain_attach() and dev_pm_domain_detach() were added in upstream + * commit 46420dd73b80 ("PM / Domains: Add APIs to attach/detach a PM domain for + * a device"). To support older kernels and OSVs that don't have these API, just + * implement how older versions worked by directly calling acpi_dev_pm_attach() + * and acpi_dev_pm_detach(). + */ +static inline int dev_pm_domain_attach(struct device *dev, bool power_on) +{ + if (dev->pm_domain) + return 0; + + if (ACPI_HANDLE(dev)) + return acpi_dev_pm_attach(dev, true); + + return 0; +} + +static inline void dev_pm_domain_detach(struct device *dev, bool power_off) +{ + if (ACPI_HANDLE(dev)) + acpi_dev_pm_detach(dev, true); +} +#else /* NEED_DEV_PM_DOMAIN_ATTACH && NEED_DEV_PM_DOMAIN_DETACH */ +/* it doesn't make sense to compat only one of these functions, and it is + * likely either a failure in kcompat-generator.sh or a failed distribution + * backport if this occurs. Don't try to support it. + */ +#ifdef NEED_DEV_PM_DOMAIN_ATTACH +#error "NEED_DEV_PM_DOMAIN_ATTACH defined but NEED_DEV_PM_DOMAIN_DETACH not defined???" +#endif /* NEED_DEV_PM_DOMAIN_ATTACH */ +#ifdef NEED_DEV_PM_DOMAIN_DETACH +#error "NEED_DEV_PM_DOMAIN_DETACH defined but NEED_DEV_PM_DOMAIN_ATTACH not defined???" +#endif /* NEED_DEV_PM_DOMAIN_DETACH */ +#endif /* NEED_DEV_PM_DOMAIN_ATTACH && NEED_DEV_PM_DOMAIN_DETACH */ + +#endif /* _AUXILIARY_COMPAT_H_ */ diff --git a/drivers/net/ethernet/guangruntong/common.mk b/drivers/net/ethernet/guangruntong/common.mk new file mode 100755 index 00000000000000..f8d9e3c8d6f31e --- /dev/null +++ b/drivers/net/ethernet/guangruntong/common.mk @@ -0,0 +1,462 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 1999 - 2024 Intel Corporation + +# +# common Makefile rules useful for out-of-tree Linux driver builds +# +# Usage: include common.mk +# +# After including, you probably want to add a minimum_kver_check call +# +# Required Variables: +# DRIVER +# -- Set to the lowercase driver name + +##################### +# Helpful functions # +##################### + +SHELL := $(shell which bash) +src ?= $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) +readlink = $(shell readlink -f ${1}) + +# helper functions for converting kernel version to version codes +get_kver = $(or $(word ${2},$(subst ., ,${1})),0) +get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \ + [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \ + [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \ + printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) ) + +################ +# depmod Macro # +################ + +cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \ + $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \ + -a ${KVER} + +##################### +# Environment tests # +##################### + +DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]") + +ifeq (,${BUILD_KERNEL}) +BUILD_KERNEL=$(shell uname -r) +endif + +# Kernel Search Path +# All the places we look for kernel source +KSP := /lib/modules/${BUILD_KERNEL}/source \ + /lib/modules/${BUILD_KERNEL}/build \ + /usr/src/linux-${BUILD_KERNEL} \ + /usr/src/kernel-headers-${BUILD_KERNEL} \ + /usr/src/kernel-source-${BUILD_KERNEL} \ + /usr/src/linux \ + /usr/src/kernels/${BUILD_KERNEL} \ + /usr/src/kernels + +# prune the list down to only values that exist and have an include/linux +# sub-directory. We can't use include/config because some older kernels don't +# have this. +test_dir = $(shell [ -e ${dir}/include/linux ] && echo ${dir}) +KSP := $(foreach dir, ${KSP}, ${test_dir}) + +# we will use this first valid entry in the search path +ifeq (,${KSRC}) + KSRC := $(firstword ${KSP}) +endif + +ifeq (,${KSRC}) + $(warning *** Kernel header files not in any of the expected locations.) + $(warning *** Install the appropriate kernel development package, e.g.) + $(error kernel-devel, for building kernel modules and try again) +else +ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC}) + KOBJ := /lib/modules/${BUILD_KERNEL}/build +else + KOBJ := ${KSRC} +endif +endif + +SCRIPT_PATH := ${KSRC}/scripts +info_signed_modules = + +ifeq (,${SCRIPT_PATH}) + info_signed_modules += echo "*** Could not find sign-file script. Cannot sign driver." ; +else + SIGN_FILE_EXISTS := $(or $(and $(wildcard $(SCRIPT_PATH)/sign-file),1),) + PRIV_KEY_EXISTS := $(or $(and $(wildcard intel-linux-key.key),1),) + PUB_KEY_EXISTS := $(or $(and $(wildcard intel-linux-key.crt),1),) +ifneq ($(and $(SIGN_FILE_EXISTS),$(PRIV_KEY_EXISTS),$(PUB_KEY_EXISTS)),) + info_signed_modules += \ + echo "*** Is sign-file present: ${SIGN_FILE_EXISTS}" ; \ + echo "*** Is private key present: ${PRIV_KEY_EXISTS}" ; \ + echo "*** Is public key present: ${PUB_KEY_EXISTS}" ; + info_signed_modules += echo "*** All files are present, signing driver." ; + sign_driver = $(shell ${SCRIPT_PATH}/sign-file sha256 intel-linux-key.key \ + intel-linux-key.crt ${DRIVER}.ko) +else + info_signed_modules += echo "*** Files are missing, cannot sign driver." ; + sign_driver = +endif +endif + +# Version file Search Path +VSP := ${KOBJ}/include/generated/utsrelease.h \ + ${KOBJ}/include/linux/utsrelease.h \ + ${KOBJ}/include/linux/version.h \ + ${KOBJ}/include/generated/uapi/linux/version.h \ + /boot/vmlinuz.version.h + +# Config file Search Path +CSP := ${KOBJ}/include/generated/autoconf.h \ + ${KOBJ}/include/linux/autoconf.h \ + /boot/vmlinuz.autoconf.h + +# System.map Search Path (for depmod) +MSP := ${KSRC}/System.map \ + /usr/lib/debug/boot/System.map-${BUILD_KERNEL} \ + /boot/System.map-${BUILD_KERNEL} + +# prune the lists down to only files that exist +test_file = $(shell [ -f ${1} ] && echo ${1}) +VSP := $(foreach file, ${VSP}, $(call test_file,${file})) +CSP := $(foreach file, ${CSP}, $(call test_file,${file})) +MSP := $(foreach file, ${MSP}, $(call test_file,${file})) + + +# and use the first valid entry in the Search Paths +ifeq (,${VERSION_FILE}) + VERSION_FILE := $(firstword ${VSP}) +endif + +ifeq (,${CONFIG_FILE}) + CONFIG_FILE := $(firstword ${CSP}) +endif + +ifeq (,${SYSTEM_MAP_FILE}) + SYSTEM_MAP_FILE := $(firstword ${MSP}) +endif + +ifeq (,$(wildcard ${VERSION_FILE})) + $(error Linux kernel source not configured - missing version header file) +endif + +ifeq (,$(wildcard ${CONFIG_FILE})) + $(error Linux kernel source not configured - missing autoconf.h) +endif + +ifeq (,$(wildcard ${SYSTEM_MAP_FILE})) + $(warning Missing System.map file - depmod will not check for missing symbols during module installation) +endif + +ifneq ($(words $(subst :, ,$(CURDIR))), 1) + $(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path) +endif + +######################## +# Extract config value # +######################## + +get_config_value = $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\ + grep -m 1 ${1} | awk '{ print $$3 }') + +################ +# dracut Macro # +################ + +cmd_initrd := $(shell \ + if [[ ${KOBJ} != /lib/modules/${BUILD_KERNEL}/* ]]; then \ + echo ""; \ + elif which dracut > /dev/null 2>&1 ; then \ + echo "dracut --force --kver ${BUILD_KERNEL}"; \ + elif which update-initramfs > /dev/null 2>&1 ; then \ + echo "update-initramfs -u -k ${BUILD_KERNEL}"; \ + fi ) + +######################## +# Check module signing # +######################## + +CONFIG_MODULE_SIG_ALL := $(call get_config_value,CONFIG_MODULE_SIG_ALL) +CONFIG_MODULE_SIG_FORCE := $(call get_config_value,CONFIG_MODULE_SIG_FORCE) +CONFIG_MODULE_SIG_KEY := $(call get_config_value,CONFIG_MODULE_SIG_KEY) + +SIG_KEY_SP := ${KOBJ}/${CONFIG_MODULE_SIG_KEY} \ + ${KOBJ}/certs/signing_key.pem + +SIG_KEY_FILE := $(firstword $(foreach file, ${SIG_KEY_SP}, $(call test_file,${file}))) + +# print a warning if the kernel configuration attempts to sign modules but +# the signing key can't be found. +ifneq (${SIG_KEY_FILE},) +warn_signed_modules := : ; +else +warn_signed_modules := +ifeq (${CONFIG_MODULE_SIG_ALL},1) +warn_signed_modules += \ + echo "*** The target kernel has CONFIG_MODULE_SIG_ALL enabled, but" ; \ + echo "*** the signing key cannot be found. Module signing has been" ; \ + echo "*** disabled for this build." ; +endif # CONFIG_MODULE_SIG_ALL=y +ifeq (${CONFIG_MODULE_SIG_FORCE},1) + warn_signed_modules += \ + echo "warning: The target kernel has CONFIG_MODULE_SIG_FORCE enabled," ; \ + echo "warning: but the signing key cannot be found. The module must" ; \ + echo "warning: be signed manually using 'scripts/sign-file'." ; +endif # CONFIG_MODULE_SIG_FORCE +DISABLE_MODULE_SIGNING := Yes +endif + +####################### +# Linux Version Setup # +####################### + +# The following command line parameter is intended for development of KCOMPAT +# against upstream kernels such as net-next which have broken or non-updated +# version codes in their Makefile. They are intended for debugging and +# development purpose only so that we can easily test new KCOMPAT early. If you +# don't know what this means, you do not need to set this flag. There is no +# arcane magic here. + +# Convert LINUX_VERSION into LINUX_VERSION_CODE +ifneq (${LINUX_VERSION},) + LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3)) +endif + +# Honor LINUX_VERSION_CODE +ifneq (${LINUX_VERSION_CODE},) + $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.) + KVER_CODE := ${LINUX_VERSION_CODE} + EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE} +endif + +# Determine SLE_KERNEL_REVISION for SuSE SLE >= 11 (needed by kcompat) +# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string +# appended to the stable kernel version on which their kernel is based with +# additional versioning information (up to 3 numbers), a possible abbreviated +# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default +# or CONFIG_LOCALVERSION=-999.gdeadbee-default +# SLE >= 15SP3 added additional information about version and service pack +# to their kernel version e.g CONFIG_LOCALVERSION=-150300.59.43.1-default +# +# SLE_LOCALVERSION_CODE is also exported to support legacy kcompat.h +# definitions. +ifeq (1,$(call get_config_value,CONFIG_SUSE_KERNEL)) + +ifneq (10,$(call get_config_value,CONFIG_SLE_VERSION)) + + CONFIG_LOCALVERSION := $(call get_config_value,CONFIG_LOCALVERSION) + LOCALVERSION := $(shell echo ${CONFIG_LOCALVERSION} | \ + cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//') + LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1) +ifeq ($(shell test ${LOCALVER_A} -gt 65535; echo $$?),0) + LOCAL_VER_MAJOR := $(shell echo ${LOCALVER_A:0:3}) + LOCAL_VER_MINOR := $(shell echo ${LOCALVER_A:3:3}) + LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2) + LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3) + LOCALVER_D := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f4) + SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_B} \* 65536 + \ + 0${LOCALVER_C} \* 256 + 0${LOCALVER_D}) + EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} + EXTRA_CFLAGS += -DSLE_KERNEL_REVISION=${LOCALVER_B} +else + LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2) + LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3) + SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \ + 0${LOCALVER_B} \* 256 + 0${LOCALVER_C}) + EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} + EXTRA_CFLAGS += -DSLE_KERNEL_REVISION=${LOCALVER_A} +endif +endif +endif + +EXTRA_CFLAGS += ${CFLAGS_EXTRA} + +# get the kernel version - we use this to find the correct install path +KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \ + awk '{ print $$3 }' | sed 's/\"//g') + +# assume source symlink is the same as build, otherwise adjust KOBJ +ifneq (,$(wildcard /lib/modules/${KVER}/build)) + ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build)) + KOBJ=/lib/modules/${KVER}/build + endif +endif + +ifeq (${KVER_CODE},) + KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\ + grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g') +endif + +# minimum_kver_check +# +# helper function to provide uniform output for different drivers to abort the +# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)". +define _minimum_kver_check +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?")) + $$(warning *** Aborting the build.) + $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3}) +endif +endef +minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3})) + +############################# +# kcompat definitions setup # +############################# + +# In most cases, kcompat flags can be checked within the driver source files +# using simple CPP checks. However, it may be necessary to check for a flag +# value within the Makefile for some specific edge cases. For example, if an +# entire feature ought to be excluded on some kernels due to missing +# functionality. +# +# To support this, kcompat_defs.h is preprocessed and converted into a word list +# that can be checked to determine whether a given kcompat feature flag will +# be defined for this kernel. +# + +# call script that populates defines automatically +$(if $(shell \ + $(if $(findstring 1,${V}),,QUIET_COMPAT=1) \ + KSRC=${KSRC} OUT=${src}/kcompat_generated_defs.h CONFIG_FILE=${CONFIG_FILE} \ + bash ${src}/kcompat-generator.sh && echo ok), , $(error kcompat-generator.sh failed)) + +################ +# Manual Pages # +################ + +MANSECTION = 7 + +ifeq (,${MANDIR}) + # find the best place to install the man page + MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g') + ifneq (,${MANPATH}) + # test based on inclusion in MANPATH + test_dir = $(findstring ${dir}, ${MANPATH}) + else + # no MANPATH, test based on directory existence + test_dir = $(shell [ -e ${dir} ] && echo ${dir}) + endif + # our preferred install path + # should /usr/local/man be in here ? + MANDIR := /usr/share/man /usr/man + MANDIR := $(foreach dir, ${MANDIR}, ${test_dir}) + MANDIR := $(firstword ${MANDIR}) +endif +ifeq (,${MANDIR}) + # fallback to /usr/man + MANDIR := /usr/man +endif + +#################### +# CCFLAGS variable # +#################### + +# set correct CCFLAGS variable for kernels older than 2.6.24 +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,24) ]; echo $$?)) +CCFLAGS_VAR := EXTRA_CFLAGS +else +CCFLAGS_VAR := ccflags-y +endif + +################# +# KBUILD_OUTPUT # +################# + +# Only set KBUILD_OUTPUT if the real paths of KOBJ and KSRC differ +ifneq ($(call readlink,${KSRC}),$(call readlink,${KOBJ})) +export KBUILD_OUTPUT ?= ${KOBJ} +endif + +############################ +# Module Install Directory # +############################ + +# Default to using updates/drivers/net/ethernet/intel/ path, since depmod since +# v3.1 defaults to checking updates folder first, and only checking kernels/ +# and extra afterwards. We use updates instead of kernel/* due to desire to +# prevent over-writing built-in modules files. +export INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/intel/${DRIVER} + +################# +# Auxiliary Bus # +################# + +# If the check_aux_bus script exists, then this driver depends on the +# auxiliary module. Run the script to determine if we need to include +# auxiliary files with this build. +CHECK_AUX_BUS ?= ../scripts/check_aux_bus +ifneq ($(call test_file,${CHECK_AUX_BUS}),) +NEED_AUX_BUS := $(shell ${CHECK_AUX_BUS} --ksrc="${KSRC}" --build-kernel="${BUILD_KERNEL}" >/dev/null 2>&1; echo $$?) +endif # check_aux_bus exists + +# The out-of-tree auxiliary module we ship should be moved into this +# directory as part of installation. +export INSTALL_AUX_DIR ?= updates/drivers/net/ethernet/intel/auxiliary + +# If we're installing auxiliary bus out-of-tree, the following steps are +# necessary to ensure the relevant files get put in place. +AUX_BUS_HEADERS ?= linux/auxiliary_bus.h auxiliary_compat.h kcompat_generated_defs.h +ifeq (${NEED_AUX_BUS},2) +define auxiliary_post_install + install -D -m 644 Module.symvers ${INSTALL_MOD_PATH}/lib/modules/${KVER}/extern-symvers/intel_auxiliary.symvers + install -d ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR} + mv -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/intel_auxiliary.ko* \ + ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/ + install -d ${INSTALL_MOD_PATH}/${KSRC}/include/linux + install -D -m 644 ${AUX_BUS_HEADERS} -t ${INSTALL_MOD_PATH}/${KSRC}/include/linux +endef +else +auxiliary_post_install = +endif + +ifeq (${NEED_AUX_BUS},2) +define auxiliary_post_uninstall + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/extern-symvers/intel_auxiliary.symvers + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/intel_auxiliary.ko* + rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h + rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_compat.h + rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/kcompat_generated_defs.h +endef +else +auxiliary_post_uninstall = +endif + +ifeq (${NEED_AUX_BUS},2) +EXTRA_CFLAGS += -DUSE_INTEL_AUX_BUS +endif +###################### +# Kernel Build Macro # +###################### + +# kernel build function +# ${1} is the kernel build target +# ${2} may contain any extra rules to pass directly to the sub-make process +# +# This function is expected to be executed by +# @+$(call kernelbuild,,) +# from within a Makefile recipe. +# +# The following variables are expected to be defined for its use: +# GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem +# CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS +# EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable +# KSRC -- the location of the kernel source tree to build against +# DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER +# W -- if set, enables the W= kernel warnings options +# C -- if set, enables the C= kernel sparse build options +# +kernelbuild = ${Q}$(call warn_signed_modules) \ + ${MAKE} $(if ${GCC_I_SYS},CC="${GCC_I_SYS}") \ + ${CCFLAGS_VAR}="${EXTRA_CFLAGS}" \ + -C "${KSRC}" \ + CONFIG_${DRIVER_UPPERCASE}=m \ + $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG=n) \ + $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG_ALL=) \ + M="${CURDIR}" \ + $(if ${W},W="${W}") \ + $(if ${C},C="${C}") \ + $(if ${NEED_AUX_BUS},NEED_AUX_BUS="${NEED_AUX_BUS}") \ + ${2} ${1} diff --git a/drivers/net/ethernet/guangruntong/dma_add.h b/drivers/net/ethernet/guangruntong/dma_add.h new file mode 100755 index 00000000000000..2b4794cd3c3a04 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/dma_add.h @@ -0,0 +1,57 @@ +#ifndef GRTDMA_H +#define GRTDMA_H + +// Target +#define TARGET_H2C 0 +#define TARGET_C2H 1 +#define TARGET_IRQ 2 +#define TARGET_CONFIG 3 + +#define TARGET_MSIX 8 + +//TARGET_H2C & TARGET_C2H +// Writable addresses +#define ADDR_SG_SWPT 0 +#define ADDR_SG_ADDRLO 1 +#define ADDR_SG_ADDRHI 2 +#define ADDR_SG_MAXNUM 3 +#define ADDR_ENGINE_CTRL 4 + +#define ADDR_DESC_CTRL 5 //WTHRESH, PTHRESH, HTHRESH +#define ADDR_INT_DELAY 6 //Write back & Interrupt Delay Value TIDV RDTR + +#define ADDR_SG_WBADDRLO 7 +#define ADDR_SG_WBADDRHI 8 + +#define ADDR_DCA_RXTXCTL 9 + +// Readable Addresses +#define ADDR_SG_HWPT 16 + + +//TARGET_IRQ +// Writable addresses +#define ADDR_INTR_ICS 0 //Interrupt Cause Set Register 1:This registers allows triggering an immediate interrupt by software 1:对应的中断激活 +#define ADDR_INTR_IMS 1 //interrupt mask set 1:对应的中断激活,0:没影响。如果要想屏蔽中断,应该用IMC +#define ADDR_INTR_IMC 2 //interrupt mask clean 1:对应的中断禁止,0:没影响。 +#define ADDR_INTR_IAM 3 //interrupt auto mask 当IAME(中断应答自动屏蔽使能)位置1时,对ICR寄存器的读或写会产生将IAM寄存器中的值写入IMC寄存器的副作用。 该位为0b时,该功能被禁用 +#define ADDR_INTR_MODE 4 //中断模式c2s:s2c 前16位的4位为channel,后面的16位的2位为模式c2s:s2c 1:c2s eop interrupt 0:s2c normal interrupt + +#define ADDR_INTR_ITR 5 //msix,多少个中断就有多少个,channel*2(RXTX) + 1(Other) ,32bit数据,前面16位(用了4位,总共32个中断)为vector,后面为ITR数据。如果不支持msix,那么数据就在第一个vector上面 + //The interval is specified in 256 ns increments. Zero disables interrupt throttling logic. + +#define ADD_INTR_IVAR 6 +#define ADD_INTR_IVAR_MISC 7 + + +// Readable Addresses +#define ADDR_INTR_VECTOR 8 + +//TARGET_CONF +#define ADDR_CORESETTINGS 0 +#define ADDR_FPGA_NAME 1 + +#define ADDR_DCA_GTCL 3 +#define ADDR_FUNC_RST 4 + +#endif /* GRTDMA_H */ diff --git a/drivers/net/ethernet/guangruntong/grtnic.h b/drivers/net/ethernet/guangruntong/grtnic.h new file mode 100755 index 00000000000000..57e5693b2c8a07 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic.h @@ -0,0 +1,1209 @@ +/* + +Copyright (c) 2018 Alex Forencich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +*/ + +#ifndef GRTNIC_CORE_H +#define GRTNIC_CORE_H + +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include + +#define CONFIG_KEYLIN_OS 1 + +//#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC +//#define HAVE_SWIOTLB_SKIP_CPU_SYNC //飞腾ARM平台可以强制打开提高性能 +//#endif + +#include "kcompat.h" +#include "dma_add.h" + +#ifdef HAVE_NDO_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +//#define CONFIG_DISABLE_PACKET_SPLIT //龙芯平台可以需要打开这个开关 +#define GRTNIC_NO_LRO + +#define DRIVER_NAME "grtnic_xgb" +#define DRIVER_VERSION "1.24.0711" + +#define CHANNEL0_PORT_MASK 0x03 + +#define CHANNEL_NUM_MAX (16) +#define GRTNIC_PORTS_MAX (16) + +#define GRTNIC_DEFAULT_TXD (512) +#define GRTNIC_DEFAULT_TX_WORK (256) +#define GRTNIC_DEFAULT_RXD (512) + +#define GRTNIC_MAX_NUM_DESCRIPTORS 4096 +#define GRTNIC_MIN_NUM_DESCRIPTORS 64 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define GRTNIC_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define GRTNIC_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define GRTNIC_REQ_TX_BUFFER_GRANULARITY 1024 + + +#define MAX_Q_VECTORS 10 + +/* Transmit and receive queues */ +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES 8 + +///////////////////////////////////////////////////////////////////////////// +#define XPHY_STATUS (0x0000) +#define MAC_ADRS_FILTER (0x0004) + +#define DESIGN_STATUS (0x0010) +#define IPXE_STATUS (0x0014) +#define TEMP_STATUS (0x0018) +#define SERIAL_NO (0x001C) + +//#define MAC_ADRS_FILTER (0x0004) //redefine in macphy.h XXGE_AFC_OFFSET +//#define MAC0_ADRS_LOW (0x0018) //redefine in macphy.h XXGE_MACADDR_OFFSET + +#define MAC_ADRS_ID (0x0020) +#define MAC_ADRS_LOW (0x0024) +#define MAC_ADRS_HIGH (0x0028) + +#define PHY_TX_DISABLE (0x0040) +#define MAC_LED_CTL (0x0044) +#define MAX_LED_PKT_NUM (0x0048) + +#define I2CCTL (0x0050) +#define ASIC_BOOT (0x0054) +#define FLASH_CMD (0x0058) + +#define ASIC_RX_FIFO_RST (0x0064) +#define ASIC_TX_FIFO_RST (0x0068) + +#define FC_WATERMARK (0x0070) +#define ETH_TX_PAUSE (0x0074) +#define CSUM_ENABLE (0x008C) + +#define MAC_HASH_TABLE_START (0x0200) +#define MAC_HASH_TABLE_WR (0x0204) +#define MAC_RX_OVERFLOW_FRAME (0x0210) + + +#define RSS_KEY_BEGIN (0x0300) +#define RSS_KEY_END (0x0324) + +#define RSS_RETA_BEGIN (0x0330) +#define RSS_RETA_END (0x03AC) + +#define FIRMWARE_CMD (0x040C) + +#define ETH_HIGH_MARK (96) +#define ETH_LOW_MARK (32) + +////////////////////////////////////////////////// +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */ +#define MAX_RX_JUMBO_FRAME_SIZE 0x2600 + + +/* Supported Rx Buffer Sizes */ +#define GRTNIC_RXBUFFER_256 256 /* Used for skb receive header */ +#define GRTNIC_RXBUFFER_1536 1536 +#define GRTNIC_RXBUFFER_2K 2048 +#define GRTNIC_RXBUFFER_3K 3072 +#define GRTNIC_RXBUFFER_4K 4096 +#ifdef CONFIG_DISABLE_PACKET_SPLIT +#define GRTNIC_RXBUFFER_7K 7168 +#define GRTNIC_RXBUFFER_8K 8192 +#define GRTNIC_RXBUFFER_15K 15360 +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ +#define GRTNIC_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ +#if (PAGE_SIZE < 8192) +#define GRTNIC_MAX_2K_FRAME_BUILD_SKB (GRTNIC_RXBUFFER_1536 - NET_IP_ALIGN) +#define GRTNIC_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + GRTNIC_RXBUFFER_1536) > SKB_WITH_OVERHEAD(GRTNIC_RXBUFFER_2K)) + +static inline int grtnic_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int grtnic_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (GRTNIC_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = GRTNIC_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = GRTNIC_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return grtnic_compute_pad(rx_buf_len); +} + +#define GRTNIC_SKB_PAD grtnic_skb_pad() +#else //(PAGE_SIZE < 8192) +#define GRTNIC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif //!(PAGE_SIZE < 8192) + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define GRTNIC_RX_HDR_SIZE GRTNIC_RXBUFFER_256 + +#define GRTNIC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#ifdef HAVE_STRUCT_DMA_ATTRS +#define GRTNIC_RX_DMA_ATTR NULL +#else +#define GRTNIC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + + + +#define MAX_EITR 0x00000FF8 +#define MIN_EITR 8 + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define GRTNIC_MIN_RSC_ITR 24 +#define GRTNIC_100K_ITR 40 +#define GRTNIC_20K_ITR 200 +#define GRTNIC_16K_ITR 248 +#define GRTNIC_12K_ITR 336 + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + + +/* Interrupt modes, as used by the IntMode parameter */ +#define GRTNIC_INT_MODE_MSIX 0 +#define GRTNIC_INT_MODE_MSI 1 +#define GRTNIC_INT_MODE_LEGACY 2 + + +/* obtain the 32 most significant (high) bits of a 32-bit or 64-bit address */ +#define PCI_DMA_H(addr) ((addr >> 16) >> 16) +/* obtain the 32 least significant (low) bits of a 32-bit or 64-bit address */ +#define PCI_DMA_L(addr) (addr & 0xffffffffUL) + +#define TX_INT_DELAY 32 +#define RX_INT_DELAY 32 + +#define GRTNIC_TXDCTL_DMA_BURST_ENABLE \ + (0x00000000 | /* set descriptor granularity */ \ + (1u << 25) | /* LWTHRESH */ \ + (8u << 16) | /* wthresh must be +1 more than desired */\ + (1u << 8) | /* hthresh */ \ + 0x20) /* pthresh */ + +#define GRTNIC_RXDCTL_DMA_BURST_ENABLE \ + (0x00000000 | /* set descriptor granularity */ \ + (1u << 25) | /* LWTHRESH */ \ + (8u << 16) | /* set writeback threshold */ \ + (4u << 8) | /* set Hrefetch threshold */ \ + 0x20) /* set Pthresh */ + +enum grt_gigeth_boards { + board_902E_GRT_FF, + board_902T_GRT_FF, + board_901ELR_GRT_FF, + board_1001E_GRT_FF, + board_1001E_QM_FF, + board_1002E_GRT_FF, + board_1005E_GRT_FX +}; + +struct grt_gigeth_info { + enum grt_gigeth_boards type; + int dma_channel_max; + unsigned char port_type; //0 for FIBER; 1 for COPPER + unsigned char port_speed; //0 for 1G; 1 for 10G +}; + +extern const struct grt_gigeth_info grt_902eff_info; +extern const struct grt_gigeth_info grt_902tff_info; +extern const struct grt_gigeth_info grt_901elr_info; +extern const struct grt_gigeth_info grt_1001eff_info; +extern const struct grt_gigeth_info qm_1001eff_info; +extern const struct grt_gigeth_info grt_1002eff_info; +extern const struct grt_gigeth_info grt_1005efx_info; + +/* Direct Cache Access (DCA) definitions */ +#define GRTNIC_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define GRTNIC_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define GRTNIC_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define GRTNIC_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define GRTNIC_DCA_RXCTRL_CPUID_MASK 0xFF000000 /* Rx CPUID Mask */ +#define GRTNIC_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID Shift */ +#define GRTNIC_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define GRTNIC_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define GRTNIC_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define GRTNIC_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define GRTNIC_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define GRTNIC_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define GRTNIC_DCA_TXCTRL_CPUID_MASK 0xFF000000 /* Tx CPUID Mask */ +#define GRTNIC_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID Shift */ +#define GRTNIC_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define GRTNIC_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define GRTNIC_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define GRTNIC_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +/* iterator for handling rings in ring container */ +#define grtnic_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define GRTNIC_TIDV_FPD BIT(31) +#define GRTNIC_RDTR_FPD BIT(31) + +#define GRTNIC_GET_DESC(R, i, type) (&(((union type *)((R).desc))[i])) +#define GRTNIC_TX_DESC(R, i) GRTNIC_GET_DESC(R, i, grtnic_tx_desc) +#define GRTNIC_RX_DESC(R, i) GRTNIC_GET_DESC(R, i, grtnic_rx_desc) + +#define GRTNIC_MAX_JUMBO_FRAME_SIZE 65536+18 + +#define GRTNIC_DEAD_READ_RETRIES 10 +#define GRTNIC_DEAD_READ_REG 0xdeadbeefU +#define GRTNIC_FAILED_READ_REG 0xffffffffU +#define GRTNIC_FAILED_READ_RETRIES 5 + + +//static inline void write_register(u32 value, void *iomem) +//{ +// iowrite32(value, iomem); +//} +// +//static inline u32 read_register(void *iomem) +//{ +// return ioread32(iomem); +//} + +static inline bool grtnic_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define GRTNIC_REMOVED(a) grtnic_removed(a) + +////////////////////////////////////////////////////////////////////////////// +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct grtnic_tx_buffer { + union grtnic_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct grtnic_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned int in_port; + u32 length; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +#endif +}; + +struct grtnic_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct grtnic_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct grtnic_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +/* Statistics counters collected by the MAC */ +struct grtnic_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 scc; + u64 mcc; + u64 mpc; + u64 ecol; + u64 latecol; + u64 dc; + u64 rlec; + u64 rxpause; + u64 txpause; + u64 tx_underrun; + u64 badopcode; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 prcoversize; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 ruc; + u64 rfc; + u64 roc; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 ptcoversize; + u64 mptc; + u64 bptc; +}; + +enum grtnic_ring_state_t { +#ifndef CONFIG_DISABLE_PACKET_SPLIT + __GRTNIC_RX_3K_BUFFER, + __GRTNIC_RX_BUILD_SKB_ENABLED, +#endif + __GRTNIC_RX_RSC_ENABLED, + __GRTNIC_RX_CSUM_UDP_ZERO_ERR, +#if IS_ENABLED(CONFIG_FCOE) + __GRTNIC_RX_FCOE, +#endif + __GRTNIC_TX_FDIR_INIT_DONE, + __GRTNIC_TX_XPS_INIT_DONE, + __GRTNIC_TX_DETECT_HANG, + __GRTNIC_HANG_CHECK_ARMED, + __GRTNIC_TX_XDP_RING, +#ifdef HAVE_AF_XDP_ZC_SUPPORT + __GRTNIC_TX_DISABLED, +#endif +}; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + +#define ring_uses_build_skb(ring) \ + test_bit(__GRTNIC_RX_BUILD_SKB_ENABLED, &(ring)->state) +#endif + +#define check_for_tx_hang(ring) \ + test_bit(__GRTNIC_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__GRTNIC_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__GRTNIC_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__GRTNIC_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__GRTNIC_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__GRTNIC_RX_RSC_ENABLED, &(ring)->state) +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) + +struct grtnic_ring { + struct grtnic_ring *next; /* pointer to next ring in q_vector */ + struct grtnic_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + + void *desc_wb; /* pointer to desc writeback memory */ + dma_addr_t desc_wb_dma;/* phys address of desc writeback memory */ + + struct sk_buff *skb; + + union { + struct grtnic_tx_buffer *tx_buffer_info; + struct grtnic_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + + unsigned int size; /* length of ring in bytes */ + u16 count; /* number of desc. in ring */ + + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + u16 next_to_use; + u16 next_to_clean; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + u16 next_to_alloc; +#endif + +//#ifdef CONFIG_DISABLE_PACKET_SPLIT + u16 rx_buffer_len; +//#endif + + struct grtnic_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#endif + union { + struct grtnic_tx_queue_stats tx_stats; + struct grtnic_rx_queue_stats rx_stats; + }; + +} ____cacheline_internodealigned_in_smp; + + +#ifndef CONFIG_DISABLE_PACKET_SPLIT +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int grtnic_rx_bufsz(struct grtnic_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(GRTNIC_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + if (test_bit(__GRTNIC_RX_3K_BUFFER, &ring->state)) + return GRTNIC_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return GRTNIC_MAX_2K_FRAME_BUILD_SKB; +#endif + return GRTNIC_RXBUFFER_2K; +#endif +} + +static inline unsigned int grtnic_rx_pg_order(struct grtnic_ring __maybe_unused *ring) +{ +#if (PAGE_SIZE < 8192) + if (test_bit(__GRTNIC_RX_3K_BUFFER, &ring->state)) + return 1; +#endif + return 0; +} +#define grtnic_rx_pg_size(_ring) (PAGE_SIZE << grtnic_rx_pg_order(_ring)) + +#endif //CONFIG_DISABLE_PACKET_SPLIT + +#define ITR_ADAPTIVE_MIN_INC 2 +#define ITR_ADAPTIVE_MIN_USECS 10 +#define ITR_ADAPTIVE_MAX_USECS 84 +#define ITR_ADAPTIVE_LATENCY 0x80 +#define ITR_ADAPTIVE_BULK 0x00 +#define ITR_ADAPTIVE_MASK_USECS (ITR_ADAPTIVE_LATENCY - ITR_ADAPTIVE_MIN_INC) + +struct grtnic_ring_container { + struct grtnic_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct grtnic_q_vector { + struct grtnic_adapter *adapter; + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + + u32 eims_value; /* EIMS mask value */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct grtnic_ring_container rx, tx; + + struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + cpumask_t affinity_mask; +#endif + int node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + bool netpoll_rx; + +#ifdef HAVE_NDO_BUSY_POLL + atomic_t state; +#endif /* HAVE_NDO_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct grtnic_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + + +#ifdef HAVE_NDO_BUSY_POLL +enum grtnic_qv_state_t { + GRTNIC_QV_STATE_IDLE = 0, + GRTNIC_QV_STATE_NAPI, + GRTNIC_QV_STATE_POLL, + GRTNIC_QV_STATE_DISABLE +}; + +static inline void grtnic_qv_init_lock(struct grtnic_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool grtnic_qv_lock_napi(struct grtnic_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, GRTNIC_QV_STATE_IDLE, GRTNIC_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != GRTNIC_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == GRTNIC_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void grtnic_qv_unlock_napi(struct grtnic_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != GRTNIC_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_IDLE); +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool grtnic_qv_lock_poll(struct grtnic_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, GRTNIC_QV_STATE_IDLE, GRTNIC_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != GRTNIC_QV_STATE_IDLE) + q_vector->rx.ring->stats.yields++; +#endif + return rc == GRTNIC_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void grtnic_qv_unlock_poll(struct grtnic_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != GRTNIC_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool grtnic_qv_busy_polling(struct grtnic_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == GRTNIC_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool grtnic_qv_disable(struct grtnic_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, GRTNIC_QV_STATE_IDLE, GRTNIC_QV_STATE_DISABLE); + + return rc == GRTNIC_QV_STATE_IDLE; +} + +#endif /* HAVE_NDO_BUSY_POLL */ + +enum grtnic_state_t { + __GRTNIC_TESTING, + __GRTNIC_RESETTING, + __GRTNIC_DOWN, + __GRTNIC_DISABLED, + __GRTNIC_REMOVING, + __GRTNIC_SERVICE_SCHED, + __GRTNIC_SERVICE_INITED, + __GRTNIC_IN_SFP_INIT, +#ifdef HAVE_PTP_1588_CLOCK + __GRTNIC_PTP_RUNNING, + __GRTNIC_PTP_TX_IN_PROGRESS, +#endif + __GRTNIC_RESET_REQUESTED, +}; + +struct grtnic_cb { +#ifdef CONFIG_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; +#endif + dma_addr_t dma; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif + u16 append_cnt; /* number of skb's appended */ +#ifndef CONFIG_DISABLE_PACKET_SPLIT + bool page_released; +#endif +}; +#define GRTNIC_CB(skb) ((struct grtnic_cb *)(skb)->cb) + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +struct grtnic_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + + +union grtnic_tx_desc { + struct { + __le64 src_addr; /* Address of descriptor's data buf */ + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + struct + { + u32 csum_info:16; + u32 reserved:12; + u32 port:4; + } tx_info; /*user data */ + } read; + + struct { + __le64 rsvd0; /* Reserved */ + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + __le32 rsvd1; + } wb; +}; + + +union grtnic_rx_desc { + struct { + __le64 src_addr; /* Packet buffer address */ + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + __le32 rsvd; + } read; + + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + + struct + { + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + + struct + { + u32 csum_ok:1; + u32 ipcs:1; + u32 tcpcs:1; + u32 udpcs:1; + u32 udp_csum_flag:1; + u32 reserved:27; + } rx_info; + } upper; + } wb; /* writeback */ +}; + +///////////////////////////////////////////////////////////////////////////////////// +#define GRTNIC_MAX_TXD_PWR 13 +#define GRTNIC_MAX_DATA_PER_TXD (1 << GRTNIC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), GRTNIC_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + + +//struct grtnic_buffer { +// dma_addr_t dma; +// struct sk_buff *skb; +// unsigned int in_port; +// unsigned long time_stamp; +// u32 length; +// u16 next_to_watch; +// unsigned int segs; +// unsigned int bytecount; +// u16 mapped_as_page; +//#ifndef CONFIG_DISABLE_PACKET_SPLIT +// struct page *page; +//#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) +// __u32 page_offset; +//#else +// __u16 page_offset; +//#endif +// __u16 pagecnt_bias; +//#endif +//}; + +struct grtnic_desc_wb { + u32 desc_hw_ptr; +} __packed; + +//////////////////////////////////////////////////////////////////////////////////// +enum fc_mode { + fc_none = 0, + fc_rx_pause, + fc_tx_pause, + fc_full, + fc_default = 0xFF +}; + +struct fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool fc_autoneg; + enum fc_mode current_mode; /* FC mode in effect */ + enum fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct grtnic_mac_info { + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + u32 mc_filter_type; + u16 mta_reg_count; + + struct fc_info fc; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; +}; + +struct grtnic_hw { + // BAR pointers + void * __iomem dma_bar; + void * __iomem user_bar; + resource_size_t dma_bar_len; + resource_size_t user_bar_len; + void *back; + struct grtnic_mac_info mac; + bool adapter_stopped; + u32 phy_addr; +}; + +/* default to trying for four seconds */ +#define GRTNIC_TRY_LINK_TIMEOUT (4 * HZ) + +struct grtnic_adapter { + struct device *dev; + struct pci_dev *pdev; + struct net_device *netdev; + + int func; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + /* TX */ + struct grtnic_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + u64 tx_busy; + + /* RX */ + struct grtnic_ring *rx_ring[MAX_RX_QUEUES]; + u64 hw_csum_rx_error; + u64 non_eop_descs; + u32 alloc_rx_page; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + const struct grt_gigeth_info *ei; + + int rss_queues; + int num_q_vectors; + + u8 ivar[MAX_Q_VECTORS]; + struct grtnic_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + + unsigned int id; + + struct proc_dir_entry *proc_dir; //for test + u32 tx_count0; + u32 tx_count1; + u32 rx_count; + + struct msix_entry *msix_entries; + int int_mode; + +#ifdef ETHTOOL_TEST + u32 test_icr; + struct grtnic_ring test_tx_ring; + struct grtnic_ring test_rx_ring; +#endif + + struct grtnic_hw hw; + u16 msg_enable; + + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; + + u32 max_frame_size; + u32 min_frame_size; + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + + struct grtnic_hw_stats stats; + +//#ifdef ETHTOOL_GRXFHINDIR +// u32 rss_indir_tbl_init; +// u8 rss_indir_tbl[RETA_SIZE]; +//#endif + +#define GRTNIC_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[GRTNIC_MAX_RETA_ENTRIES]; + +#define GRTNIC_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 *rss_key; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define GRTNIC_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define GRTNIC_FLAG_MSI_ENABLED (u32)(1 << 1) +#define GRTNIC_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define GRTNIC_FLAG_MSIX_ENABLED (u32)(1 << 3) + +#define GRTNIC_FLAG_TXCSUM_CAPABLE (u32)(1 << 4) +#define GRTNIC_FLAG_RXCSUM_CAPABLE (u32)(1 << 5) + +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define GRTNIC_FLAG_DCA_ENABLED (u32)(1 << 6) +#define GRTNIC_FLAG_DCA_CAPABLE (u32)(1 << 7) +#define GRTNIC_FLAG_DCA_ENABLED_DATA (u32)(1 << 8) +#else +#define GRTNIC_FLAG_DCA_ENABLED (u32)0 +#define GRTNIC_FLAG_DCA_CAPABLE (u32)0 +#define GRTNIC_FLAG_DCA_ENABLED_DATA (u32)0 +#endif +#define GRTNIC_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define GRTNIC_FLAG_DCB_ENABLED (u32)(1 << 10) +#define GRTNIC_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define GRTNIC_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define GRTNIC_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define GRTNIC_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) +#define GRTNIC_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define GRTNIC_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#if IS_ENABLED(CONFIG_FCOE) +#define GRTNIC_FLAG_FCOE_CAPABLE (u32)(1 << 17) +#define GRTNIC_FLAG_FCOE_ENABLED (u32)(1 << 18) +#endif /* CONFIG_FCOE */ +#define GRTNIC_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define GRTNIC_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define GRTNIC_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define GRTNIC_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define GRTNIC_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define GRTNIC_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define GRTNIC_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define GRTNIC_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define GRTNIC_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define GRTNIC_FLAG_MDD_ENABLED (u32)(1 << 29) +#define GRTNIC_FLAG_DCB_CAPABLE (u32)(1 << 30) +#define GRTNIC_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(31) + +// struct grtnic_mac_info mac; + int type; + int speed; + + u16 bd_number; + bool netdev_registered; +}; + +/* Error Codes */ +#define GRTNIC_SUCCESS 0 +#define GRTNIC_ERR_OUT_OF_MEM -34 + + +//////////////////////////////////////////////////////////////// +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ + (void)(netdev_printk(KERN_##klevel, adapter->netdev, \ + "%s: " fmt, __func__, ## args)) : NULL) + +#define hw_err(hw, format, arg...) \ + netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +static inline void GRTNIC_WRITE_REG(struct grtnic_hw *hw, u32 reg, u32 value, u8 bar) +{ + u8 __iomem *reg_addr; + + reg_addr = bar ? hw->dma_bar : hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return; + writel(value, reg_addr + reg); +} + +#define GRTNIC_READ_REG(h, r, b) grtnic_read_reg(h, r, b) //hw, reg, bar +#define GRTNIC_WRITE_FLUSH(a) GRTNIC_READ_REG(a, XPHY_STATUS, 0) + + + +//#ifdef CONFIG_BQL +static inline struct netdev_queue *txring_txq(const struct grtnic_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} +//#endif /* CONFIG_BQL */ + +u32 grtnic_read_reg(struct grtnic_hw *hw, u32 reg, u8 bar); + +#ifdef GRTNIC_PROCFS +void grtnic_procfs_exit(struct grtnic_adapter *adapter); +int grtnic_procfs_init(struct grtnic_adapter *adapter); +int grtnic_procfs_topdir_init(void); +void grtnic_procfs_topdir_exit(void); +#endif /* GRTNIC_PROCFS */ + + //main.c +void grtnic_write_itr (struct grtnic_q_vector *q_vector); +void grtnic_update_stats(struct grtnic_adapter *adapter); +void grtnic_down(struct grtnic_adapter *adapter); +void grtnic_assign_netdev_ops(struct net_device *netdev); +irqreturn_t grtnic_msix_other(int __always_unused irq, void *data); +irqreturn_t grtnic_msix_ring(int __always_unused irq, void *data); +irqreturn_t grtnic_isr (int __always_unused irq, void *data); +int grtnic_poll(struct napi_struct *napi, int budget); +void grtnic_close_suspend(struct grtnic_adapter *adapter); + +void grtnic_check_options(struct grtnic_adapter *adapter); //in param.c + + //netdev.c +void grtnic_setup_mrqc(struct grtnic_adapter *adapter); +void grtnic_configure_msix(struct grtnic_adapter *adapter); +void grtnic_configure_msi_and_legacy(struct grtnic_adapter *adapter); +int grtnic_request_irq(struct grtnic_adapter *adapter); +void grtnic_irq_enable(struct grtnic_adapter *adapter); +void grtnic_irq_disable(struct grtnic_adapter *adapter); +void grtnic_free_irq(struct grtnic_adapter *adapter); +void grtnic_napi_enable_all(struct grtnic_adapter *adapter); +void grtnic_napi_disable_all(struct grtnic_adapter *adapter); +void grtnic_service_event_schedule(struct grtnic_adapter *adapter); +void grtnic_set_ethtool_ops(struct net_device *netdev); + +//#ifdef ETHTOOL_OPS_COMPAT +// int ethtool_ioctl(struct ifreq *ifr); +//#endif + + //ethtool.c +void grtnic_disable_rx_queue(struct grtnic_adapter *adapter); +void grtnic_disable_tx_queue(struct grtnic_adapter *adapter); +void grtnic_reset(struct grtnic_adapter *adapter); +void grtnic_do_reset(struct net_device *netdev); + +void grtnic_configure_tx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring); +void grtnic_configure_rx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring); +void grtnic_alloc_rx_buffers(struct grtnic_ring *rx_ring, u16 cleaned_count); +netdev_tx_t grtnic_xmit_frame_ring (struct sk_buff *skb, struct grtnic_adapter __maybe_unused *adapter, struct grtnic_ring *tx_ring); + +int grtnic_close(struct net_device *netdev); +int grtnic_open(struct net_device *netdev); + +int grtnic_setup_tx_resources(struct grtnic_ring *tx_ring); +int grtnic_setup_rx_resources(struct grtnic_ring *rx_ring); +void grtnic_free_tx_resources(struct grtnic_ring *tx_ring); +void grtnic_free_rx_resources(struct grtnic_ring *rx_ring); +void grtnic_up(struct grtnic_adapter *adapter); +void grtnic_store_reta(struct grtnic_adapter *adapter); + +u32 grtnic_rss_indir_tbl_entries(struct grtnic_adapter *adapter); +void grtnic_store_key(struct grtnic_adapter *adapter); + +#endif /* GRTNIC_CORE_H */ diff --git a/drivers/net/ethernet/guangruntong/grtnic_ethtool.c b/drivers/net/ethernet/guangruntong/grtnic_ethtool.c new file mode 100755 index 00000000000000..db03efa7ad2fbd --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_ethtool.c @@ -0,0 +1,2262 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include + +#include "grtnic.h" +#include "grtnic_nvm.h" +#include "grtnic_macphy.h" + +#define be32(x) ((x<<24 & 0xff000000) | (x<<8 & 0x00ff0000) | (x>>8 & 0x0000ff00) | (x>>24 & 0x000000ff)) +#define be16(x) ((x<<8 & 0xff00) | (x>>8 & 0x00ff)) + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + + +#ifdef ETHTOOL_GSTATS +struct grtnic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define GRTNIC_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} + +static const struct grtnic_stats grtnic_gstrings_net_stats[] = { + GRTNIC_NETDEV_STAT(rx_errors), + GRTNIC_NETDEV_STAT(tx_errors), + GRTNIC_NETDEV_STAT(tx_dropped), + GRTNIC_NETDEV_STAT(rx_length_errors), + GRTNIC_NETDEV_STAT(rx_over_errors), + GRTNIC_NETDEV_STAT(rx_frame_errors), + GRTNIC_NETDEV_STAT(rx_fifo_errors), + GRTNIC_NETDEV_STAT(tx_fifo_errors), + GRTNIC_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define GRTNIC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct grtnic_adapter, _stat), \ + .stat_offset = offsetof(struct grtnic_adapter, _stat) \ +} + +static const struct grtnic_stats grtnic_gstrings_stats[] = { + GRTNIC_STAT("rx_packets", stats.gprc), + GRTNIC_STAT("tx_packets", stats.gptc), + GRTNIC_STAT("rx_bytes", stats.gorc), + GRTNIC_STAT("tx_bytes", stats.gotc), + + GRTNIC_STAT("lsc_int", lsc_int), + GRTNIC_STAT("tx_busy", tx_busy), + GRTNIC_STAT("non_eop_descs", non_eop_descs), +// GRTNIC_STAT("tx_timeout_count", tx_timeout_count), + GRTNIC_STAT("tx_restart_queue", restart_queue), + GRTNIC_STAT("rx_csum_offload_errors", hw_csum_rx_error), + GRTNIC_STAT("alloc_rx_page", alloc_rx_page), + GRTNIC_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + GRTNIC_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + + GRTNIC_STAT("rx_broadcast", stats.bprc), + GRTNIC_STAT("tx_broadcast", stats.bptc), + GRTNIC_STAT("rx_multicast", stats.mprc), + GRTNIC_STAT("tx_multicast", stats.mptc), + GRTNIC_STAT("multicast", stats.mprc), + GRTNIC_STAT("rx_pause", stats.rxpause), + GRTNIC_STAT("tx_pause", stats.txpause), + GRTNIC_STAT("tx_underrun", stats.tx_underrun), + GRTNIC_STAT("rx_crc_errors", stats.crcerrs), + GRTNIC_STAT("rx_missed_errors", stats.mpc), + GRTNIC_STAT("tx_aborted_errors", stats.ecol), + GRTNIC_STAT("tx_window_errors", stats.latecol), + GRTNIC_STAT("tx_abort_late_coll", stats.latecol), + GRTNIC_STAT("tx_deferred_ok", stats.dc), + GRTNIC_STAT("tx_single_coll_ok", stats.scc), + GRTNIC_STAT("tx_multi_coll_ok", stats.mcc), + GRTNIC_STAT("rx_long_length_errors", stats.roc), + GRTNIC_STAT("rx_short_length_errors", stats.ruc), + GRTNIC_STAT("rx_align_errors", stats.algnerrc), + GRTNIC_STAT("rx_long_byte_count", stats.gorc) +}; + +/* grtnic allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define GRTNIC_NUM_RX_QUEUES netdev->num_tx_queues +#define GRTNIC_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define GRTNIC_NUM_RX_QUEUES adapter->num_tx_queues +#define GRTNIC_NUM_TX_QUEUES adapter->num_tx_queues +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define GRTNIC_NUM_TX_QUEUES 1 +#define GRTNIC_NUM_RX_QUEUES ( \ + ((struct grtnic_adapter *)netdev_priv(netdev))->num_rx_queues) +#endif /* HAVE_TX_MQ */ + +#define GRTNIC_QUEUE_STATS_LEN ( \ + (GRTNIC_NUM_TX_QUEUES + GRTNIC_NUM_RX_QUEUES) * \ + (sizeof(struct grtnic_queue_stats) / sizeof(u64))) + +#define GRTNIC_GLOBAL_STATS_LEN ARRAY_SIZE(grtnic_gstrings_stats) +#define GRTNIC_NETDEV_STATS_LEN ARRAY_SIZE(grtnic_gstrings_net_stats) +#define GRTNIC_STATS_LEN \ + (GRTNIC_GLOBAL_STATS_LEN + GRTNIC_NETDEV_STATS_LEN + GRTNIC_QUEUE_STATS_LEN) + +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char grtnic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define GRTNIC_TEST_LEN (sizeof(grtnic_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + + +#ifdef ETHTOOL_GLINKSETTINGS +static int grtnic_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int max_port_speed = adapter->speed; + int port_speed = adapter->link_speed; + u32 fiber_speed = SPEED_1000; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + if(adapter->type==1) //copper + { + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + + cmd->base.port = PORT_TP; + } + + else //fiber + { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + else + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + + cmd->base.port = PORT_FIBRE; + fiber_speed = max_port_speed ? SPEED_10000 : SPEED_1000; + } + + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + cmd->base.speed = (adapter->type==0) ? fiber_speed : (port_speed==0x03) ? SPEED_10000 : (port_speed==0x02) ? SPEED_1000 : (port_speed==0x01) ? SPEED_100 : SPEED_10; + cmd->base.duplex = DUPLEX_FULL; + } + } + + cmd->base.phy_address = adapter->func; + cmd->base.autoneg = AUTONEG_ENABLE; + + return 0; +} + +static int grtnic_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) +{ + return 0; +} + + +#else /* !ETHTOOL_GLINKSETTINGS */ +static int grtnic_nic_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int max_port_speed = adapter->speed; + int port_speed = adapter->link_speed; + u32 fiber_speed = SPEED_1000; + + if(adapter->type==1) //copper + { + ecmd->supported = ( SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + (max_port_speed ? SUPPORTED_10000baseT_Full : 0)| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + + ecmd->advertising = ecmd->supported | ADVERTISED_TP | ADVERTISED_Autoneg | ADVERTISED_Pause; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + } + + else //fiber + { + ecmd->supported = (max_port_speed ? SUPPORTED_10000baseT_Full : SUPPORTED_1000baseT_Full) | SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause; + ecmd->advertising = ecmd->supported | ADVERTISED_FIBRE | ADVERTISED_Autoneg | ADVERTISED_Pause; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + fiber_speed = max_port_speed ? SPEED_10000 : SPEED_1000; + } + + ecmd->speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + ecmd->speed = (adapter->type==0) ? fiber_speed : (port_speed==0x03) ? SPEED_10000 : (port_speed==0x02) ? SPEED_1000 : (port_speed==0x01) ? SPEED_100 : SPEED_10; + ecmd->duplex = DUPLEX_FULL; + } + } + + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->phy_address = adapter->func; + + return 0; +} + +static int grtnic_nic_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ +// struct grtnic_port *grtnic_port = netdev_priv(netdev); +// struct grtnic_adapter *adapter = grtnic_port->adapter; +// u32 phy_mode_control_val32 = 0; + + //printk("netdev_no=%d, autoneg=%d, speed=%d duplex=%d\n", port_adapter->netdev_no, ecmd->autoneg, ecmd->speed, ecmd->duplex); + + return 0; +#if 0 + phy_mode_control_val32 = phy_read(adapter, port_adapter->phyid, PHY_MODE_CONTRL_REG); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + phy_mode_control_val32 |= BIT(12); + phy_mode_control_val32 |= BIT(6); /* forced speed selection bit 6,13 */ + } else { + phy_mode_control_val32 &= ~BIT(12); + + if (ecmd->speed == SPEED_1000) { /* 10 */ + phy_mode_control_val32 |= BIT(6); + phy_mode_control_val32 &= ~BIT(13); + } else if (ecmd->speed == SPEED_100 && port_adapter->support_100M) { /* 01 */ + phy_mode_control_val32 &= ~BIT(6); + phy_mode_control_val32 |= BIT(13); + } else + return -EINVAL; + + if (ecmd->duplex == DUPLEX_FULL) + phy_mode_control_val32 |= BIT(8); /* full duplex bit 8 */ + else + phy_mode_control_val32 &= ~BIT(8); + } + + port_adapter->phy_mode_control_val = phy_mode_control_val32; + + chip_rx_disable(adapter); + phy_write(adapter, port_adapter->phyid, PHY_MODE_CONTRL_REG, phy_mode_control_val32); + chip_rx_enable(adapter); + + return 0; +#endif +} + +#endif //* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + +////////////////////////////////////////////////////////////////////////////////////////// + +static void grtnic_nic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + char firmware_version[32]; + char ipxe_version[32]; + u32 hw_version[2]; + u8 offset = adapter->speed ? 1 : 0; +// u32 sn_h[2]; +// u8 sn_h_l; +// char sn_s[64]; +// u32 chip_temp; + + strncpy(drvinfo->driver, DRIVER_NAME, 32); + strncpy(drvinfo->version, DRIVER_VERSION, 32); + +// chip_temp = read_register(adapter->user_bar + TEMP_STATUS); +// printk("temp = %d\n", (chip_temp*504)/4096-273); +// +// sn_h[0] = read_register(adapter->user_bar + SERIAL_NO); +// sn_h[1] = read_register(adapter->user_bar + SERIAL_NO); +// sn_h_l = read_register(adapter->user_bar + SERIAL_NO) & 0xff; +// sprintf(sn_s, "%08x%08x%02x", sn_h[0],sn_h[1],sn_h_l); +// printk("sn = %s\n", sn_s); + + hw_version[0] = GRTNIC_READ_REG(hw, DESIGN_STATUS, 0); + hw_version[1] = GRTNIC_READ_REG(hw, IPXE_STATUS, 0); + + if(hw_version[0] < 0x200) //maybe old firmware //0x101 + { + read_flash_buffer(adapter, (VPD_OFFSET - (offset * 0x100000)) + VERSION_OFFSET, 2, (u32 *)&hw_version); + sprintf(firmware_version, "%08d", hw_version[0] ^ 0xFFFFFFFF); + sprintf(ipxe_version, "%08d", hw_version[1] ^ 0xFFFFFFFF); + } + else + { + sprintf(firmware_version, "%08x", hw_version[0]); + sprintf(ipxe_version, "%08x", hw_version[1]); + } + + strncpy(drvinfo->fw_version, firmware_version, 32); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) + strncpy(drvinfo->erom_version, ipxe_version, 32); +#endif + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = GRTNIC_STATS_LEN; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static void grtnic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + + pause->autoneg = (mac->fc.fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (mac->fc.current_mode == fc_rx_pause) { + pause->rx_pause = 1; + } else if (mac->fc.current_mode == fc_tx_pause) { + pause->tx_pause = 1; + } else if (mac->fc.current_mode == fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static void +grtnic_nic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +#else +static void grtnic_nic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = GRTNIC_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = GRTNIC_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static int +grtnic_nic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +#else +static int grtnic_nic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (ring->tx_pending > GRTNIC_MAX_NUM_DESCRIPTORS || + ring->tx_pending < GRTNIC_MIN_NUM_DESCRIPTORS || + ring->rx_pending > GRTNIC_MAX_NUM_DESCRIPTORS || + ring->rx_pending < GRTNIC_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, + GRTNIC_MIN_NUM_DESCRIPTORS, + GRTNIC_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, + GRTNIC_REQ_TX_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, + GRTNIC_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__GRTNIC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct grtnic_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + grtnic_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct grtnic_ring)); + + temp_ring[i].count = new_tx_count; + err = grtnic_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + grtnic_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + grtnic_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct grtnic_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct grtnic_ring)); + + temp_ring[i].count = new_rx_count; + err = grtnic_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + grtnic_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + grtnic_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct grtnic_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + grtnic_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__GRTNIC_RESETTING, &adapter->state); + return err; +} + + + + +static int grtnic_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + u8 flowctl = 0; + int retval = 0; + + mac->fc.fc_autoneg = pause->autoneg; + + if (mac->fc.fc_autoneg == AUTONEG_ENABLE) { + mac->fc.requested_mode = fc_full; + } else { + if (pause->rx_pause && pause->tx_pause) + mac->fc.requested_mode = fc_full; + else if (pause->rx_pause && !pause->tx_pause) + mac->fc.requested_mode = fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + mac->fc.requested_mode = fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + mac->fc.requested_mode = fc_none; + + mac->fc.current_mode = mac->fc.requested_mode; + + } + + if(mac->fc.requested_mode == fc_full) flowctl = 3; + else if(mac->fc.requested_mode == fc_tx_pause) flowctl = 2; + else if(mac->fc.requested_mode == fc_rx_pause) flowctl = 1; + else flowctl = 0; + + if(pause->tx_pause) + grtnic_set_fc_watermarks(netdev); + + GRTNIC_WRITE_REG(hw, ETH_TX_PAUSE, pause->tx_pause, 0); + + grtnic_SetPause(netdev, flowctl); + + return retval; +} + + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int grtnic_get_stats_count(struct net_device *netdev) +{ + return GRTNIC_STATS_LEN; +} + +static int grtnic_diag_test_count(struct net_device *netdev) +{ + return GRTNIC_TEST_LEN; +} + +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +static int grtnic_get_sset_count(struct net_device *netdev, int sset) +{ +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + struct grtnic_adapter *adapter = netdev_priv(netdev); +#endif +#endif + + switch (sset) { + case ETH_SS_STATS: + return GRTNIC_STATS_LEN; + case ETH_SS_TEST: + return GRTNIC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: +// return IXGBE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +static void grtnic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, u64 *data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_NDO_GET_STATS64 + const struct rtnl_link_stats64 *net_stats; + struct rtnl_link_stats64 temp; + unsigned int start; +#else +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif +#endif + struct grtnic_ring *ring; + int i, j; + char *p; + + grtnic_update_stats(adapter); +#ifdef HAVE_NDO_GET_STATS64 + net_stats = dev_get_stats(netdev, &temp); +#endif + + for (i = 0; i < GRTNIC_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + grtnic_gstrings_net_stats[i].stat_offset; + data[i] = (grtnic_gstrings_net_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < GRTNIC_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + grtnic_gstrings_stats[j].stat_offset; + data[i] = (grtnic_gstrings_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < GRTNIC_NUM_TX_QUEUES; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + for (j = 0; j < GRTNIC_NUM_RX_QUEUES; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } +} + +static void grtnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *grtnic_gstrings_test, + GRTNIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < GRTNIC_NETDEV_STATS_LEN; i++) { + memcpy(p, grtnic_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < GRTNIC_GLOBAL_STATS_LEN; i++) { + memcpy(p, grtnic_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < GRTNIC_NUM_TX_QUEUES; i++) { + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < GRTNIC_NUM_RX_QUEUES; i++) { + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + case ETH_SS_PRIV_FLAGS: +// memcpy(data, ixgbe_priv_flags_strings, +// IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + } +} + +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int grtnic_nic_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + u8 led_cmd, led_on; + + led_cmd = 1<<5; + led_on = 1<<4; + + switch (state) { + case ETHTOOL_ID_ACTIVE: +// grtnic_port->led_reg = read_register(adapter->user_bar + MAC_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, (led_cmd|led_on), 0); //led_start+led_on + break; + + case ETHTOOL_ID_OFF: + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, led_cmd, 0); //led_start + led_off + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, 0, 0); //led_stop and led_off + break; + } + + return 0; +} +#else +static int grtnic_nic_phys_id(struct net_device *netdev, u32 data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + u32 i; + u8 led_cmd, led_on; + + led_cmd = 1<<5; + led_on = 1<<4; + + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 1000); i += 400) { + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, (led_cmd|led_on), 0); //led_start+led_on + msleep_interruptible(200); + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, led_cmd, 0); //led_start + led_off + msleep_interruptible(200); + } + + /* Restore LED settings */ + + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, 0, 0); //led_stop and led_off + + return 0; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + +int firmware_is_old(struct grtnic_adapter *adapter) +{ + int old_firmware; + struct grtnic_hw *hw = &adapter->hw; + + GRTNIC_WRITE_REG(hw, FIRMWARE_CMD, 1, 0); + old_firmware = !GRTNIC_READ_REG(hw, FIRMWARE_CMD, 0); + GRTNIC_WRITE_REG(hw, FIRMWARE_CMD, 0, 0); + return old_firmware; +} + +static int grtnic_flash_device(struct net_device *netdev, struct ethtool_flash *flash) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + int i = 0; + + u16 temp = 0; + u16 vid = 0; + u16 pid = 0; + int image_type = 0; + + char version_s[64]; + u32 version = 0; + long version_h; + long result = 0; + + int pxe_size = 0; + char ipxe_ver_s[9] = {0}; + + const struct firmware *fw; + const char *filename = flash->data; + int rc = 0; + + u32 offset = 0; + u32 copied = 0; + + u32 read_filesize; + u32 once_size; + + int firmware_offset; + + int cycle, remainder, schedule; + u32 offset_int; + u32 *segment; + + int old_firmware = firmware_is_old(adapter); + + rc = request_firmware(&fw, filename, &netdev->dev); + if (rc != 0) { + netdev_err(netdev, "PKG error %d requesting file: %s\n", rc, filename); + printk("You MUST copy image file to /lib/firmware directory!!!"); + return rc; + } + + read_filesize = fw->size; + cycle = read_filesize / FLASH_SECTOR_SIZE; + remainder = read_filesize % FLASH_SECTOR_SIZE; + cycle = cycle + (remainder ? 1 : 0); + + + firmware_offset = adapter->speed; //10G,link_speed = 1 + temp = *(u16 *) (fw->data); + + if(temp==0xAA55) //maybe pxe image + { + image_type = 1; + vid = *(u16 *) (fw->data+ 0x20); + pid = *(u16 *) (fw->data+ 0x22); + + pxe_size = read_filesize - 8; //last 8 is ver + memcpy(ipxe_ver_s, (fw->data + pxe_size), 8); +// version = strtol(ipxe_ver_s,NULL,0); + + if(kstrtol(ipxe_ver_s,0,&result)) + result = 0; + version = result^0xFFFFFFFF; + + offset = PXE_OFFSET - (firmware_offset * 0x100000); + } + + else if( (temp&0xff)==0x82 && read_filesize==0x100) //maybe vpd image + { + image_type = 2; + offset = VPD_OFFSET - (firmware_offset * 0x100000); + } + + else //firmware + { + version = *(u32 *)(fw->data); + pid = *(u16 *) (fw->data+ 0x04); //exchange vid & pid pos + vid = *(u16 *) (fw->data+ 0x06); + vid ^=0xFFFF; + pid ^=0xFFFF; + + offset = old_firmware ? 0 : 0x200000; + } + + if(image_type!=2) //vpd no vid&pid + { + if(vid != adapter->pdev->vendor || pid != adapter->pdev->device) + { + printk("Wrong image!\n\n"); + return 0; + } + } + + printk("Found %s image File!!! ", (image_type==1) ? "pxe" : (image_type==2) ? "vpd" : "firmware"); + if(image_type==0 || image_type==1) + printk("and version = %08d", version^0xFFFFFFFF); + + printk("\n\n"); + + + if(image_type==2) //vpd image no needed vpd & pid + { + segment = vmalloc(FLASH_SUBSECTOR_SIZE); + memset(segment, 0x00, FLASH_SUBSECTOR_SIZE); + + read_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, segment); + erase_subsector_flash(adapter, offset); + + memcpy(segment, fw->data, read_filesize); + + write_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, segment); + vfree(segment); + } + + else //firmware or pxe image + { + GRTNIC_WRITE_REG(&adapter->hw, FIRMWARE_CMD, 1, 0); + + while (read_filesize>0) + { + erase_sector_flash(adapter, offset); + + if(read_filesize >= FLASH_SECTOR_SIZE) + once_size = FLASH_SECTOR_SIZE; + else + once_size = read_filesize; + + if(once_size & 0x04) + write_flash_buffer(adapter, offset, (once_size>>2)+1, (u32 *)(fw->data + copied)); + else + write_flash_buffer(adapter, offset, once_size>>2, (u32 *)(fw->data + copied)); + + schedule = (i+1)*100 / cycle; + + if(i< cycle-1) + printk("\rUpgrading--->[%d%%]",schedule); + else + printk("\rUpgrading--->[%s]\n\n","Done"); + + touch_softlockup_watchdog(); + + read_filesize = read_filesize - once_size; + offset += once_size; + copied += once_size; + i++; + } + + GRTNIC_WRITE_REG(&adapter->hw, FIRMWARE_CMD, 0, 0); + + //next write version to flash + offset_int = VPD_OFFSET - (firmware_offset * 0x100000); + segment = vmalloc(FLASH_SUBSECTOR_SIZE); + memset(segment, 0x00, FLASH_SUBSECTOR_SIZE); + + read_flash_buffer(adapter, offset_int, FLASH_SUBSECTOR_SIZE>>2, segment); + erase_subsector_flash(adapter, offset_int); + + sprintf(version_s, "%08d", version ^ 0xFFFFFFFF); + if(kstrtol(version_s,16,&version_h)) + version_h = 0; + //save 16jinzhi version for easy asic get version cmd + + segment[(VERSION_OFFSET>>2) + image_type] = version; + segment[(VERSION_OFFSET>>2) + 4 + image_type] = version_h ^ 0xFFFFFFFF; + + write_flash_buffer(adapter, offset_int, FLASH_SUBSECTOR_SIZE>>2, segment); + vfree(segment); + } + + release_firmware(fw); + + printk("firmware Update Complete\n"); +// printk("Triggering IPROG to reload ASIC...\n"); +// write_register(0xFEE1DEAD, adapter->user_bar + 0x0054); + printk("YOU MUST REBOOT COMPUTER TO LET NEW FIRMWARE BEGIN WORKS!\n"); + return rc; +} + +#ifdef ETHTOOL_GRXRINGS + +static int grtnic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; +// case ETHTOOL_GRXCLSRLCNT: +// cmd->rule_cnt = adapter->fdir_filter_count; +// ret = 0; +// break; +// case ETHTOOL_GRXCLSRULE: +// ret = grtnic_get_ethtool_fdir_entry(adapter, cmd); +// break; +// case ETHTOOL_GRXCLSRLALL: +// ret = grtnic_get_ethtool_fdir_all(adapter, cmd, +// (u32 *)rule_locs); +// break; +// case ETHTOOL_GRXFH: +// ret = grtnic_get_rss_hash_opts(adapter, cmd); +// break; + default: + break; + } + + return ret; +} + +#endif /* ETHTOOL_GRXRINGS */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +static int grtnic_rss_indir_tbl_max(struct grtnic_adapter *adapter) +{ + return 16; +} + +static u32 grtnic_get_rxfh_key_size(struct net_device *netdev) +{ + return GRTNIC_RSS_KEY_SIZE; +} + +static u32 grtnic_rss_indir_size(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + return grtnic_rss_indir_tbl_entries(adapter); +} + +static void grtnic_get_reta(struct grtnic_adapter *adapter, u32 *indir) +{ + int i, reta_size = grtnic_rss_indir_tbl_entries(adapter); + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int grtnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +static int grtnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif + + if (indir) + grtnic_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, grtnic_get_rxfh_key_size(netdev)); + + return 0; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int grtnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int grtnic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +static int grtnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = grtnic_rss_indir_tbl_entries(adapter); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + return -EINVAL; +#endif + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, grtnic_rss_indir_tbl_max(adapter)); + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + grtnic_store_reta(adapter); + } + + /* Fill out the rss hash key */ + if (key) { + memcpy(adapter->rss_key, key, grtnic_get_rxfh_key_size(netdev)); + grtnic_store_key(adapter); + } + + return 0; +} +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +////////////////////////////////////////////////////////////////////////////////////////////// +static irqreturn_t grtnic_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct grtnic_adapter *adapter = netdev_priv(netdev); + + adapter->test_icr = GRTNIC_READ_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_VECTOR*4), 1); + + return IRQ_HANDLED; +} + +static int grtnic_intr_test(struct grtnic_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (GRTNIC_REMOVED(adapter->hw.dma_bar)) { + *data = 1; + return -1; + } + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & GRTNIC_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &grtnic_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &grtnic_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &grtnic_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), 0xFFFFFFFF, 1); + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 2; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), mask, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), mask, 1); //trigger interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), mask, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), mask, 1); //trigger interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), ~mask & 0x03, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), ~mask & 0x03, 1); //trigger interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), 0xFFFFFFFF, 1); + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + + +static void grtnic_free_desc_rings(struct grtnic_adapter *adapter) +{ + /* Shut down the DMA engines now so they can be reinitialized later, + * since the test rings and normally used rings should overlap on + * queue 0 we can just use the standard disable Rx/Tx calls and they + * will take care of disabling the test rings for us. + */ + + /* first Rx */ + grtnic_disable_rx_queue(adapter); + + /* now Tx */ + grtnic_disable_tx_queue(adapter); + + grtnic_reset(adapter); + + grtnic_free_tx_resources(&adapter->test_tx_ring); + grtnic_free_rx_resources(&adapter->test_rx_ring); +} + + + +static int grtnic_setup_desc_rings(struct grtnic_adapter *adapter) +{ + struct grtnic_ring *tx_ring = &adapter->test_tx_ring; + struct grtnic_ring *rx_ring = &adapter->test_rx_ring; + int ret_val; + int err; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = GRTNIC_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = grtnic_setup_tx_resources(tx_ring); + if (err) + return 1; + + grtnic_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = GRTNIC_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + rx_ring->rx_buffer_len = GRTNIC_RXBUFFER_2K; + + err = grtnic_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + grtnic_SetRx(adapter->netdev, 0); //stop rx + GRTNIC_WRITE_REG(&adapter->hw, ASIC_RX_FIFO_RST, 0xff, 0); //reset all channel rx fifo data + + grtnic_configure_rx_ring(adapter, rx_ring); + + grtnic_SetRx(adapter->netdev, 1); //start rx + + return 0; + +err_nomem: + grtnic_free_desc_rings(adapter); + return ret_val; +} + +static int grtnic_setup_loopback_test(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 reg_data; + u8 promisc_mode = 1; + + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x00, 0); //enable laser; only for led blink + + if(adapter->ei->type == board_1002E_GRT_FF || adapter->ei->type == board_1005E_GRT_FX) + { + //enable loopback + grtnic_SetPhyAddr(adapter->netdev, phy_addr, 0x01, 0x00); //prtad_devad_reg //mdio reg:1.0 + grtnic_PhyRead(adapter->netdev, phy_addr, 0x01, ®_data); + reg_data |= 0x01; //loopback 1.0.0 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x01, reg_data); + } + + else + { + /* Setup PHY loopback */ + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, ®_data); + + reg_data |= PHY_LOOPBACK; + reg_data &= ~PHY_ISOLATE; + reg_data &= ~PHY_AUTO_NEG_EN; + + if(adapter->ei->type == board_902T_GRT_FF) + reg_data &= ~PHY_POWER_DOWN; + + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, reg_data); + + if(adapter->ei->type == board_902T_GRT_FF) + { + /* Setup mac speed */ + grtnic_ResetRx(adapter->netdev); + grtnic_SetSpeed(adapter->netdev, 0x02); //speed 1000 + } + } + + /*muliticast mode*/ + reg_data = grtnic_GetAdrsFilter(adapter->netdev); + reg_data |= promisc_mode; //promisc + grtnic_SetAdrsFilter(adapter->netdev, reg_data); + + GRTNIC_WRITE_REG(hw, CSUM_ENABLE, 0, 0); ////tx rx checksum off + + usleep_range(10000, 20000); + + return 0; +} + +static void grtnic_loopback_cleanup(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 reg_data; + u8 promisc_mode = 1; + u8 csum_tx_mode = 0, csum_rx_mode = 0; + + if(adapter->flags & GRTNIC_FLAG_TXCSUM_CAPABLE) csum_tx_mode = 1; + if(adapter->flags & GRTNIC_FLAG_RXCSUM_CAPABLE) csum_rx_mode = 1; + GRTNIC_WRITE_REG(hw, CSUM_ENABLE, (csum_rx_mode << 1 | csum_tx_mode), 0); //告诉asic, tx checksum offload + + if(adapter->ei->type == board_1002E_GRT_FF || adapter->ei->type == board_1005E_GRT_FX) + { + //disable loopback + grtnic_SetPhyAddr(adapter->netdev, phy_addr, 0x01, 0x00); //prtad_devad_reg //mdio reg:1.0 + grtnic_PhyRead(adapter->netdev, phy_addr, 0x01, ®_data); + reg_data &= ~0x01; //clear loopback 1.0.0 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x01, reg_data); + } + + else + { + /* Clear PHY loopback */ + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, ®_data); + reg_data &= ~PHY_LOOPBACK; + reg_data |= PHY_AUTO_NEG_EN; + + if(adapter->ei->type == board_902T_GRT_FF) + reg_data |= PHY_POWER_DOWN; + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, reg_data); + } + + /*Clear muliticast mode*/ + reg_data = grtnic_GetAdrsFilter(adapter->netdev); + reg_data &= ~promisc_mode; //promisc + grtnic_SetAdrsFilter(adapter->netdev, reg_data); +} + +static void grtnic_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool grtnic_check_lbtest_frame(struct grtnic_rx_buffer *rx_buffer, unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + +#ifdef CONFIG_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else + data = kmap(rx_buffer->page) + rx_buffer->page_offset; +#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + +#endif + return match; +} + +static u16 grtnic_clean_test_rings(struct grtnic_ring *rx_ring, struct grtnic_ring *tx_ring, unsigned int size) +{ + union grtnic_rx_desc *rx_desc; +#ifdef CONFIG_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buffer_len; +#else + const int bufsz = grtnic_rx_bufsz(rx_ring); +#endif + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = GRTNIC_RX_DESC(*rx_ring, rx_ntc); + + while (tx_ntc != tx_ring->next_to_use) { + union grtnic_tx_desc *tx_desc; + struct grtnic_tx_buffer *tx_buffer; + + tx_desc = GRTNIC_TX_DESC(*tx_ring, tx_ntc); + + /* if DD is not set transmit has not completed */ + if (!tx_desc->wb.len_ctl.cmp) + return count; + + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + /* increment Tx next to clean counter */ + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + } + + while (rx_desc->wb.upper.len_ctl.cmp) { + struct grtnic_rx_buffer *rx_buffer; + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (grtnic_check_lbtest_frame(rx_buffer, size)) + count++; + else + break; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); + + /* increment Rx next to clean counter */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = GRTNIC_RX_DESC(*rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + grtnic_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +#define DESC_PER_LOOP 64 + +static int grtnic_run_loopback_test(struct grtnic_adapter *adapter) +{ + struct grtnic_ring *tx_ring = &adapter->test_tx_ring; + struct grtnic_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + grtnic_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / DESC_PER_LOOP) * 2) + 1; + else + lc = ((rx_ring->count / DESC_PER_LOOP) * 2) + 1; + + for (j = 0; j <= lc; j++) { + unsigned int good_cnt; + + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < DESC_PER_LOOP; i++) { + skb_get(skb); + tx_ret_val = grtnic_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != DESC_PER_LOOP) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = grtnic_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != DESC_PER_LOOP) { + ret_val = 13; + break; + } + + } + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + + +static int grtnic_loopback_test(struct grtnic_adapter *adapter, u64 *data) +{ + *data = grtnic_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = grtnic_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = grtnic_run_loopback_test(adapter); + grtnic_loopback_cleanup(adapter); + +err_loopback: + grtnic_free_desc_rings(adapter); +out: + return *data; +} + +static bool grtnic_eeprom_test(struct grtnic_adapter *adapter, u64 *data) +{ + int firmware_offset = adapter->speed; //10G,link_speed = 1 + int offset_int = 0xF00000 - (firmware_offset * 0x100000); + u32 ident_id; + + read_flash_buffer(adapter, offset_int, 1, &ident_id); + + if (ident_id != 0x665599AA) { + *data = 1; + return true; + } else { + *data = 0; + return false; + } +} + + +/* ethtool register test data */ +struct grtnic_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +static struct grtnic_reg_test reg_test[] = { + { MAX_LED_PKT_NUM, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +// { MAC_ADRS_LOW, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { MAC_ADRS_HIGH, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { FC_WATERMARK, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { MAC_ADRS_FILTER, 1, SET_READ_TEST, 0x00000007, 0x00000007 }, + { CSUM_ENABLE, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, + { .reg = 0 } +}; + +static bool reg_set_and_check(struct grtnic_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + before = GRTNIC_READ_REG(&adapter->hw, reg, 0); + GRTNIC_WRITE_REG(&adapter->hw, reg, write & mask, 0); + usleep_range(10, 20); //wait for data stable + val = GRTNIC_READ_REG(&adapter->hw, reg, 0); + + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + return true; + } + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + return false; +} + + +static bool reg_pattern_test(struct grtnic_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = GRTNIC_READ_REG(&adapter->hw, reg, 0); + GRTNIC_WRITE_REG(&adapter->hw, reg, test_pattern[pat] & write, 0); + usleep_range(10, 20); //wait for data stable + val = GRTNIC_READ_REG(&adapter->hw, reg, 0); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + return true; + } + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + } + return false; +} + +static bool grtnic_reg_test(struct grtnic_adapter *adapter, u64 *data) +{ + struct grtnic_reg_test *test; + struct grtnic_hw *hw = &adapter->hw; + u32 i; + + if (GRTNIC_REMOVED(hw->user_bar)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test; + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + GRTNIC_WRITE_REG(hw, test->reg + (i * 0x40), + test->write, 0); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + + +static void grtnic_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct grtnic_hw *hw = &adapter->hw; + + if (GRTNIC_REMOVED(hw->user_bar)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__GRTNIC_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if(GRTNIC_READ_REG(hw, XPHY_STATUS, 0) & 0x01) //link up + data[4] = 0; + else { + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + if (if_running) + /* indicate we're in test mode */ + grtnic_close(netdev); + else + grtnic_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (grtnic_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (grtnic_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (grtnic_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + + e_info(hw, "loopback testing starting\n"); + if (grtnic_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__GRTNIC_TESTING, &adapter->state); + + if (if_running) + grtnic_open(netdev); + else + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x01, 0); //disable laser; + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if(GRTNIC_READ_REG(hw, XPHY_STATUS, 0) & 0x01) //link up + data[4] = 0; + else { + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__GRTNIC_TESTING, &adapter->state); + } + + msleep_interruptible(4 * 1000); +} + +#ifdef ETHTOOL_GMODULEINFO +static int grtnic_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + struct grtnic_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + bool page_swap = false; + + if(adapter->type!=0) //not fiber + return 0; + + /* Check whether we support SFF-8472 or not */ + status = grtnic_read_i2c_eeprom(hw, GRTNIC_SFF_SFF_8472_COMP, &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = grtnic_read_i2c_eeprom(hw, GRTNIC_SFF_SFF_8472_SWAP, &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & GRTNIC_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == GRTNIC_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int grtnic_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + struct grtnic_hw *hw = &adapter->hw; + u32 status = GRTNIC_ERR_PHY_ADDR_INVALID; + u8 databyte = 0xFF; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__GRTNIC_IN_SFP_INIT, &adapter->state)) + return -EBUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = grtnic_read_i2c_eeprom(hw, i, &databyte); + else + status = grtnic_read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - ee->offset] = databyte; + } + + return 0; +} +#endif /* ETHTOOL_GMODULEINFO */ + +#ifndef HAVE_NDO_SET_FEATURES +static u32 grtnic_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int grtnic_set_rx_csum(struct net_device *netdev, u32 data) +{ + + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + return 0; +} + +static int grtnic_set_tx_csum(struct net_device *netdev, u32 data) +{ + + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} +#endif /* HAVE_NDO_SET_FEATURES */ + +static int grtnic_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int grtnic_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = GRTNIC_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = GRTNIC_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < GRTNIC_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= GRTNIC_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < GRTNIC_100K_ITR)) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ +// need_reset |= grtnic_update_rsc(adapter); + +// if (adapter->hw.mac.dmac_config.watchdog_timer && +// (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { +// e_info(probe, +// "Disabling DMA coalescing because interrupt throttling is disabled\n"); +// adapter->hw.mac.dmac_config.watchdog_timer = 0; +// ixgbe_dmac_config(&adapter->hw); +// } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct grtnic_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + grtnic_write_itr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + grtnic_do_reset(netdev); + + return 0; +} + +static u32 grtnic_get_msglevel(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void grtnic_set_msglevel(struct net_device *netdev, u32 data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +///////////////////////////////////////////////////////////////////////////////////////////// +static struct ethtool_ops grtnic_nic_ethtool_ops = { +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = grtnic_get_link_ksettings, + .set_link_ksettings = grtnic_set_link_ksettings, +#else + .get_settings = grtnic_nic_get_settings, + .set_settings = grtnic_nic_set_settings, +#endif + .get_drvinfo = grtnic_nic_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = grtnic_nic_get_ringparam, + .set_ringparam = grtnic_nic_set_ringparam, + .get_pauseparam = grtnic_get_pauseparam, + .set_pauseparam = grtnic_set_pauseparam, + .get_msglevel = grtnic_get_msglevel, + .set_msglevel = grtnic_set_msglevel, +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = grtnic_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .self_test = grtnic_diag_test, + .get_strings = grtnic_get_strings, + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = grtnic_nic_set_phys_id, +#else + .phys_id = grtnic_nic_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = grtnic_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = grtnic_get_sset_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_ethtool_stats = grtnic_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = grtnic_get_coalesce, + .set_coalesce = grtnic_set_coalesce, +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = grtnic_get_rx_csum, + .set_rx_csum = grtnic_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = grtnic_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = grtnic_get_rxnfc, +// .set_rxnfc = ixgbe_set_rxnfc, +//#ifdef ETHTOOL_SRXNTUPLE +// .set_rx_ntuple = ixgbe_set_rx_ntuple, +//#endif +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = grtnic_get_module_info, + .get_module_eeprom = grtnic_get_module_eeprom, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = grtnic_rss_indir_size, + .get_rxfh_key_size = grtnic_get_rxfh_key_size, + .get_rxfh = grtnic_get_rxfh, + .set_rxfh = grtnic_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + + .flash_device = grtnic_flash_device, +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext grtnic_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .set_phys_id = grtnic_nic_set_phys_id, + +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = grtnic_get_module_info, + .get_module_eeprom = grtnic_get_module_eeprom, +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = grtnic_rss_indir_size, + .get_rxfh_key_size = grtnic_get_rxfh_key_size, + .get_rxfh = grtnic_get_rxfh, + .set_rxfh = grtnic_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +}; + +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + + +void grtnic_set_ethtool_ops(struct net_device *netdev) +{ +#ifndef ETHTOOL_OPS_COMPAT + netdev->ethtool_ops = &grtnic_nic_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &grtnic_nic_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &grtnic_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +} \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_macphy.c b/drivers/net/ethernet/guangruntong/grtnic_macphy.c new file mode 100755 index 00000000000000..fd733d369a8aac --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_macphy.c @@ -0,0 +1,1009 @@ +#include "grtnic.h" +#include "grtnic_macphy.h" + +void grtnic_SetSpeed(struct net_device *netdev, int speed) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Speed_reg = speed << 30; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_SPEED_OFFSET), Speed_reg, 0); +} + + +void grtnic_SetFc(struct net_device *netdev, int onoff) //flow control no use, use setpause blow +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Reg; + + Reg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), 0); + if(onoff) + { + Reg |= XXGE_FCC_FCRX_MASK; + } + else + { + Reg &= ~XXGE_FCC_FCRX_MASK; + } + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), Reg, 0); +} + +int grtnic_ResetTx(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegTc; + u32 TimeoutLoops; + + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + RegTc |= XXGE_TC_RST_MASK; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TC_OFFSET), RegTc, 0); + TimeoutLoops = XXGE_RST_DELAY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && (RegTc & XXGE_TC_RST_MASK)) { + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + TimeoutLoops --; + } //return somthing + + if(0 == TimeoutLoops ) { + return 1; + } + return 0; +} + +int grtnic_ResetRx(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegRcw1; + u32 TimeoutLoops; + + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + RegRcw1 |= XXGE_RCW1_RST_MASK; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), RegRcw1, 0); + + TimeoutLoops = XXGE_RST_DELAY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && (RegRcw1 & XXGE_RCW1_RST_MASK)) { + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + + if(0 == TimeoutLoops ) { + return 1; + } + return 0; +} + + +void grtnic_SetTx(struct net_device *netdev, int onoff) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegTc; + + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + if(onoff) + { + RegTc |= XXGE_TC_TX_MASK; + } + else + { + RegTc &= ~XXGE_TC_TX_MASK; + } + + RegTc |= XXGE_TC_DIC_MASK; //Deficit Idle Count Enable + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TC_OFFSET), RegTc, 0); + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); +} + +void grtnic_SetRx(struct net_device *netdev, int onoff) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegRcw1; + + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + + if(onoff) + { + RegRcw1 |= XXGE_RCW1_RX_MASK; + } + else + { + RegRcw1 &= ~XXGE_RCW1_RX_MASK; + } + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), RegRcw1, 0); + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); +// printk("after RegRcw1 = %08x\n", RegRcw1); +} + +void grtnic_GetRx(struct net_device *netdev, u32 *status) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + *status = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); +// printk("read RegRcw1 = %08x\n", *status); +} + +void grtnic_SetMaxFrameLen(struct net_device *netdev, int len) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 value; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + value = ((len & 0x7FFF) | (1<<16)); + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TMTU_OFFSET), value, 0); + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RMTU_OFFSET), value, 0); +} + +void grtnic_SetJumbo(struct net_device *netdev, int onoff) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 RegRcw1; + u32 RegTc; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + + if(onoff) + { + RegRcw1 |= XXGE_RCW1_JUM_MASK; + RegTc |= XXGE_TC_JUM_MASK; + } + else + { + RegRcw1 &= ~XXGE_RCW1_JUM_MASK; + RegTc &= ~XXGE_TC_JUM_MASK; + } + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), RegRcw1, 0); + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TC_OFFSET), RegTc, 0); +} + +void grtnic_SetAdrsFilter(struct net_device *netdev, int filter) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + GRTNIC_WRITE_REG(hw, MAC_ADRS_FILTER, filter, 0); +} + +int grtnic_GetAdrsFilter(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + int filter; + + filter = GRTNIC_READ_REG(hw, MAC_ADRS_FILTER, 0); + return filter; +} + +void grtnic_SetMacAddress(struct net_device *netdev, const u8 *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + MacAddr = Aptr[0]; + MacAddr |= Aptr[1] << 8; + MacAddr |= Aptr[2] << 16; + MacAddr |= Aptr[3] << 24; + GRTNIC_WRITE_REG(hw, MAC_ADRS_LOW, MacAddr, 0); //addr l + + MacAddr = 0; + MacAddr |= Aptr[4]; + MacAddr |= Aptr[5] << 8; + GRTNIC_WRITE_REG(hw, MAC_ADRS_HIGH, MacAddr, 0); //addr h +} + + +void grtnic_GetMacAddress(struct net_device *netdev, void *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + MacAddr = GRTNIC_READ_REG(hw, MAC_ADRS_LOW, 0); + Aptr[0] = (u8) MacAddr; + Aptr[1] = (u8) (MacAddr >> 8); + Aptr[2] = (u8) (MacAddr >> 16); + Aptr[3] = (u8) (MacAddr >> 24); + + MacAddr = GRTNIC_READ_REG(hw, MAC_ADRS_HIGH, 0); + Aptr[4] = (u8) MacAddr; + Aptr[5] = (u8) (MacAddr >> 8); +} + +void grtnic_PhySetMdioDivisor(struct net_device *netdev, u8 Divisor) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG0_OFFSET), ((u32) Divisor | XXGE_MDIO_CFG0_MDIOEN_MASK), 0); +} + +void grtnic_SetPhyAddr(struct net_device *netdev, u32 Prtad, u32 Devad, u32 RegisterNum) //only for 10G phy +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Address; + u32 MdioCtrlReg = 0; + u32 TimeoutLoops; + /* Sequence of steps is: + * - Set MDIO REG (TX Data) + * - TX Data opcode (CFG1) 0x00 and PRTAD, DEVAD be written (TX Data) + * - Check for MDIO ready at every step + */ + + /* + * Wait till the MDIO interface is ready to accept a new transaction. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout 1\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_TX_DATA_OFFSET), RegisterNum, 0); + + /* Now initiate the set PHY register address operation */ + Address = ((Prtad << 24) | (Devad << 16)); + MdioCtrlReg = Address | XXGE_MDIO_CFG1_INITIATE_MASK; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), MdioCtrlReg, 0); + /* + * Wait till MDIO transaction is completed. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout 2\n"); +////////////////////////////////////////////////////////////////////////////////////////////// +} + + +void grtnic_PhyRead(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 *PhyDataPtr) //if 10G, RegisterNum is devad +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + int max_speed = adapter->speed; + u32 mdio_cfg1_op_read_mask; + u32 TimeoutLoops; + + + u32 Address; + u32 MdioCtrlReg = 0; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + /* Sequence of steps is: + * - Set Address opcode (CFG1) and actual address (TX Data) + * - RX Data opcode (CFG1) and actual data read (RX Data) + * - Check for MDIO ready at every step + */ + + /* + * Wait till MDIO interface is ready to accept a new transaction. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + + mdio_cfg1_op_read_mask = (max_speed==1) ? XXGE_MDIO_CFG1_OP_READ_MASK_10G : XXGE_MDIO_CFG1_OP_READ_MASK; + + + /* Now initiate the set PHY register address operation */ + + Address = ((PhyAddress << 24) | (RegisterNum << 16)); + MdioCtrlReg = Address | XXGE_MDIO_CFG1_INITIATE_MASK | mdio_cfg1_op_read_mask; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), MdioCtrlReg, 0); + /* + * Wait till MDIO transaction is completed. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + *PhyDataPtr = (u16) GRTNIC_READ_REG(&adapter->hw, (BaseAddr + XXGE_MDIO_RX_DATA_OFFSET), 0); +} + + +void grtnic_PhyWrite(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 PhyData) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Address; + u32 TimeoutLoops; + + u32 MdioCtrlReg = 0; + /* Sequence of steps is: + * - Set Address opcode (CFG1) and actual address (TX Data) + * - TX Data opcode (CFG1) and actual data to be written (TX Data) + * - Check for MDIO ready at every step + */ + + /* + * Wait till the MDIO interface is ready to accept a new transaction. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + /* Now initiate the set PHY register address operation */ + Address = ((PhyAddress << 24) | (RegisterNum << 16)); + MdioCtrlReg = Address | XXGE_MDIO_CFG1_INITIATE_MASK | XXGE_MDIO_CFG1_OP_WRITE_MASK; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_TX_DATA_OFFSET), (PhyData & XXGE_MDIO_TX_DATA_MASK), 0); + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), MdioCtrlReg, 0); + /* + * Wait till MDIO transaction is completed. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + +} + +void grtnic_SetPause (struct net_device *netdev, u8 flowctl) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 RegFc; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + + RegFc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), 0); + + printk("RegFc = %08x, flowctl=%x\n",RegFc, flowctl); + RegFc &= ~(XXGE_FCC_FCRX_MASK | XXGE_FCC_FCTX_MASK); + RegFc |= flowctl<<29; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), RegFc, 0); +} + +void grtnic_set_fc_watermarks (struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 value = ((ETH_HIGH_MARK << 16) | ETH_LOW_MARK); + GRTNIC_WRITE_REG(hw, FC_WATERMARK, value, 0); +} + +void grtnic_SetMacPauseAddress(struct net_device *netdev, const u8 *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + /* Set the MAC bits [31:0] in RCW0 register */ + MacAddr = Aptr[0]; + MacAddr |= Aptr[1] << 8; + MacAddr |= Aptr[2] << 16; + MacAddr |= Aptr[3] << 24; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW0_OFFSET), MacAddr, 0); + + /* RCW1 contains other info that must be preserved */ + MacAddr = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + MacAddr &= ~XXGE_RCW1_PAUSEADDR_MASK; + /* Set MAC bits [47:32] */ + MacAddr |= Aptr[4]; + MacAddr |= Aptr[5] << 8; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), MacAddr, 0); +} + +void grtnic_GetMacPauseAddress(struct net_device *netdev, void *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + /* Read MAC bits [31:0] in ERXC0 */ + MacAddr = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW0_OFFSET), 0); + Aptr[0] = (u8) MacAddr; + Aptr[1] = (u8) (MacAddr >> 8); + Aptr[2] = (u8) (MacAddr >> 16); + Aptr[3] = (u8) (MacAddr >> 24); + + /* Read MAC bits [47:32] in RCW1 */ + MacAddr = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + Aptr[4] = (u8) MacAddr; + Aptr[5] = (u8) (MacAddr >> 8); +} + +u64 grtnic_get_statistics_cnt(struct grtnic_adapter *adapter, u32 reg, u32 old_cnt) +{ + struct grtnic_hw *hw = &adapter->hw; + u64 new_val; + u32 temp_val0 = 0; + u64 temp_val1 = 0; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + temp_val0 = GRTNIC_READ_REG(hw, (BaseAddr + reg), 0); //low + temp_val1 = GRTNIC_READ_REG(hw, (BaseAddr + reg + 4), 0); //hi + new_val = (temp_val1 << 32) | temp_val0; + + return new_val; +} + +///////////////////////////////////////////////////////////////////////////////// +/** + * grtnic_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. + **/ +static void grtnic_lower_i2c_clk(struct grtnic_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("grtnic_lower_i2c_clk"); + + *i2cctl &= ~(GRTNIC_I2C_CLK_OUT); + + GRTNIC_WRITE_REG(hw, I2CCTL, *i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(GRTNIC_I2C_T_FALL); +} + +/** + * grtnic_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. + **/ +static void grtnic_raise_i2c_clk(struct grtnic_hw *hw, u32 *i2cctl) +{ + u32 i = 0; + u32 timeout = GRTNIC_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; + + DEBUGFUNC("grtnic_raise_i2c_clk"); + + for (i = 0; i < timeout; i++) { + *i2cctl |= GRTNIC_I2C_CLK_OUT; + + GRTNIC_WRITE_REG(hw, I2CCTL, *i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ + usec_delay(GRTNIC_I2C_T_RISE); + + i2cctl_r = GRTNIC_READ_REG(hw, I2CCTL, 0); + if (i2cctl_r & GRTNIC_I2C_CLK_IN) + break; + } +} + +/** + * grtnic_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. + **/ +static bool grtnic_get_i2c_data(struct grtnic_hw *hw, u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("grtnic_get_i2c_data"); + + if (*i2cctl & GRTNIC_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * grtnic_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. + **/ +static s32 grtnic_set_i2c_data(struct grtnic_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = GRTNIC_SUCCESS; + + DEBUGFUNC("grtnic_set_i2c_data"); + + if (data) + *i2cctl |= GRTNIC_I2C_DATA_OUT; + else + *i2cctl &= ~(GRTNIC_I2C_DATA_OUT); + + GRTNIC_WRITE_REG(hw, I2CCTL, *i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(GRTNIC_I2C_T_RISE + GRTNIC_I2C_T_FALL + GRTNIC_I2C_T_SU_DATA); + + if (!data) /* Can't verify data in this case */ + return GRTNIC_SUCCESS; + + /* Verify data was set correctly */ + *i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + if (data != grtnic_get_i2c_data(hw, i2cctl)) { + status = GRTNIC_ERR_I2C; + printk("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * grtnic_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static void grtnic_clock_in_i2c_bit(struct grtnic_hw *hw, bool *data) +{ + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_clock_in_i2c_bit"); + + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + *data = grtnic_get_i2c_data(hw, &i2cctl); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(GRTNIC_I2C_T_LOW); +} + +/** + * grtnic_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 grtnic_clock_out_i2c_bit(struct grtnic_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_clock_out_i2c_bit"); + + status = grtnic_set_i2c_data(hw, &i2cctl, data); + if (status == GRTNIC_SUCCESS) { + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(GRTNIC_I2C_T_LOW); + } else { + status = GRTNIC_ERR_I2C; + printk("I2C data was not set to %X\n", data); + } + + return status; +} + +/** + * grtnic_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. + **/ +static void grtnic_i2c_start(struct grtnic_hw *hw) +{ + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_i2c_start"); + + /* Start condition must begin with data and clock high */ + grtnic_set_i2c_data(hw, &i2cctl, 1); + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(GRTNIC_I2C_T_SU_STA); + + grtnic_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(GRTNIC_I2C_T_HD_STA); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(GRTNIC_I2C_T_LOW); + +} + +/** + * grtnic_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. + **/ +static void grtnic_i2c_stop(struct grtnic_hw *hw) +{ + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + grtnic_set_i2c_data(hw, &i2cctl, 0); + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(GRTNIC_I2C_T_SU_STO); + + grtnic_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(GRTNIC_I2C_T_BUF); +} + +/** + * grtnic_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static void grtnic_clock_in_i2c_byte(struct grtnic_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("grtnic_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + grtnic_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } +} + +/** + * grtnic_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 grtnic_clock_out_i2c_byte(struct grtnic_hw *hw, u8 data) +{ + s32 status = GRTNIC_SUCCESS; + s32 i; + u32 i2cctl; + bool bit; + + DEBUGFUNC("grtnic_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = grtnic_clock_out_i2c_bit(hw, bit); + + if (status != GRTNIC_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + i2cctl |= GRTNIC_I2C_DATA_OUT; + GRTNIC_WRITE_REG(hw, I2CCTL, i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + + return status; +} + +/** + * grtnic_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 grtnic_get_i2c_ack(struct grtnic_hw *hw) +{ + s32 status = GRTNIC_SUCCESS; + u32 i = 0; + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + u32 timeout = 10; + bool ack = 1; + + DEBUGFUNC("grtnic_get_i2c_ack"); + + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + ack = grtnic_get_i2c_data(hw, &i2cctl); + + usec_delay(1); + if (!ack) + break; + } + + if (ack) { + printk("I2C ack was not received.\n"); + status = GRTNIC_ERR_I2C; + } + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(GRTNIC_I2C_T_LOW); + + return status; +} + +/** + * grtnic_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void grtnic_i2c_bus_clear(struct grtnic_hw *hw) +{ + u32 i2cctl; + u32 i; + + DEBUGFUNC("grtnic_i2c_bus_clear"); + + grtnic_i2c_start(hw); + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + grtnic_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(GRTNIC_I2C_T_LOW); + } + + grtnic_i2c_start(hw); + + /* Put the i2c bus back to default state */ + grtnic_i2c_stop(hw); +} + +/** + * grtnic_read_i2c_byte_generic_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 grtnic_read_i2c_byte_generic_int(struct grtnic_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) +{ + s32 status; + u32 max_retry = 10; + u32 retry = 0; + bool nack = 1; + *data = 0; + + DEBUGFUNC("grtnic_read_i2c_byte_generic"); + + do { + grtnic_i2c_start(hw); + + /* Device Address and write indication */ + status = grtnic_clock_out_i2c_byte(hw, dev_addr); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_get_i2c_ack(hw); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_clock_out_i2c_byte(hw, byte_offset); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_get_i2c_ack(hw); + if (status != GRTNIC_SUCCESS) + goto fail; + + grtnic_i2c_start(hw); + + /* Device Address and read indication */ + status = grtnic_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_get_i2c_ack(hw); + if (status != GRTNIC_SUCCESS) + goto fail; + + grtnic_clock_in_i2c_byte(hw, data); + + status = grtnic_clock_out_i2c_bit(hw, nack); + if (status != GRTNIC_SUCCESS) + goto fail; + + grtnic_i2c_stop(hw); + + return GRTNIC_SUCCESS; + +fail: + grtnic_i2c_bus_clear(hw); + + if (retry < max_retry) + printk("I2C byte read error - Retrying.\n"); + else + printk("I2C byte read error.\n"); + retry++; + + } while (retry <= max_retry); + + return status; +} + +/** + * grtnic_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 grtnic_read_i2c_byte(struct grtnic_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) +{ + return grtnic_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, data, true); +} + +/** + * grtnic_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 grtnic_read_i2c_eeprom(struct grtnic_hw *hw, u8 byte_offset, u8 *eeprom_data) +{ + DEBUGFUNC("grtnic_read_i2c_eeprom_generic"); + + return grtnic_read_i2c_byte(hw, byte_offset, GRTNIC_I2C_EEPROM_DEV_ADDR, eeprom_data); +} + +/** + * grtnic_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @sff8472_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 grtnic_read_i2c_sff8472(struct grtnic_hw *hw, u8 byte_offset, u8 *sff8472_data) +{ + return grtnic_read_i2c_byte(hw, byte_offset, GRTNIC_I2C_EEPROM_DEV_ADDR2, sff8472_data); +} diff --git a/drivers/net/ethernet/guangruntong/grtnic_macphy.h b/drivers/net/ethernet/guangruntong/grtnic_macphy.h new file mode 100755 index 00000000000000..4526da67fe5152 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_macphy.h @@ -0,0 +1,256 @@ +#ifndef _GRTNICMACPHY_H_ +#define _GRTNICMACPHY_H_ + +struct sfp_info { + u8 wr_cmd; + u8 count; + u8 dev_addr; + u8 reg_addr; +}; + + +void grtnic_SetSpeed(struct net_device *netdev, int speed); +void grtnic_SetFc(struct net_device *netdev, int onoff); +int grtnic_ResetTx(struct net_device *netdev); +int grtnic_ResetRx(struct net_device *netdev); +void grtnic_SetTx(struct net_device *netdev, int onoff); +void grtnic_SetRx(struct net_device *netdev, int onoff); +void grtnic_GetRx(struct net_device *netdev, u32 *status); + +void grtnic_SetMaxFrameLen(struct net_device *netdev, int len); +void grtnic_SetJumbo(struct net_device *netdev, int onoff); + +void grtnic_SetAdrsFilter(struct net_device *netdev, int filter); +int grtnic_GetAdrsFilter(struct net_device *netdev); +u32 grtnic_GetSFP_Reg(struct net_device *netdev, struct sfp_info* sfc_info); + +void grtnic_SetMacAddress(struct net_device *netdev, const u8 *AddressPtr); +void grtnic_GetMacAddress(struct net_device *netdev, void *AddressPtr); +void grtnic_PhySetMdioDivisor(struct net_device *netdev, u8 Divisor); +int grtnic_Get_phy_status(struct net_device *netdev, int *linkup); + +void grtnic_SetPhyAddr(struct net_device *netdev, u32 Prtad, u32 Devad, u32 RegisterNum); //only for 10G phy +void grtnic_PhyRead(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 *PhyDataPtr); +void grtnic_PhyWrite(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 PhyData); + +void grtnic_SetPause (struct net_device *netdev, u8 flowctl); +void grtnic_set_fc_watermarks (struct net_device *netdev); +void grtnic_SetMacPauseAddress(struct net_device *netdev, const u8 *AddressPtr); +void grtnic_GetMacPauseAddress(struct net_device *netdev, void *AddressPtr); +u64 grtnic_get_statistics_cnt(struct grtnic_adapter *adapter, u32 reg, u32 old_cnt); + +s32 grtnic_read_i2c_eeprom(struct grtnic_hw *hw, u8 byte_offset, u8 *eeprom_data); +s32 grtnic_read_i2c_sff8472(struct grtnic_hw *hw, u8 byte_offset, u8 *sff8472_data); + + +#define XXGE_PORT_ADDRBASE 0x00008000 + +#define XXGE_RCW0_OFFSET 0x00000400 /**< Rx Configuration Word 0 */ +#define XXGE_RCW1_OFFSET 0x00000404 /**< Rx Configuration Word 1 */ +#define XXGE_TC_OFFSET 0x00000408 /**< Tx Configuration */ +#define XXGE_FCC_OFFSET 0x0000040C /**< Flow Control Configuration */ + +#define XXGE_SPEED_OFFSET 0x00000410 /**< MAC Speed Configuration */ + +#define XXGE_RMTU_OFFSET 0x00000414 /**< Receiver MTU Configuration Word ~chng.. */ +#define XXGE_TMTU_OFFSET 0x00000418 /**< Transmitter MTU Configuration Word ~chng.. */ + +#define XXGE_MDIO_REGISTER_ADDRESS 32 /* Register to read for getting phy status */ +#define XXGE_MDIO_CFG0_OFFSET 0x00000500 /**< MDIO Configuration word 0 */ +#define XXGE_MDIO_CFG1_OFFSET 0x00000504 /**< MDIO Configuration word 1 */ +#define XXGE_MDIO_TX_DATA_OFFSET 0x00000508 /**< MDIO TX Data */ +#define XXGE_MDIO_RX_DATA_OFFSET 0x0000050C /**< MDIO RX Data (Read-only) */ + +#define XXGE_TC_TXCONTROLBIT 0x1C /* Set this bit to 0 to disable transmission */ + +/** @name Flow Control Configuration (FCC) Register Bit definitions + * @{ + */ +#define XXGE_FCC_FCRX_MASK 0x20000000 /**< Rx flow control enable */ +#define XXGE_FCC_FCTX_MASK 0x40000000 /**< Tx flow control enable */ + + +/** @name Receive Configuration Word 1 (RCW1) Register bit definitions + * @{ + */ +#define XXGE_RCW1_RST_MASK 0x80000000 /**< Reset */ +#define XXGE_RCW1_JUM_MASK 0x40000000 /**< Jumbo frame enable */ +#define XXGE_RCW1_FCS_MASK 0x20000000 /**< In-Band FCS enable + * (FCS not stripped) */ +#define XXGE_RCW1_RX_MASK 0x10000000 /**< Receiver enable */ +#define XXGE_RCW1_VLAN_MASK 0x08000000 /**< VLAN frame enable */ +#define XXGE_RCW1_HD_MASK 0x04000000 /**< Receiver Preserve Preamble Enable !!chng... change HD<->PP */ +#define XXGE_RCW1_LT_DIS_MASK 0x02000000 /**< Length/type field valid check + * disable + */ +#define XXGE_RCW1_CL_DIS_MASK 0x01000000 /**< Control frame Length check + * disable + */ +#define XXGE_RCW1_PAUSEADDR_MASK 0x0000FFFF /**< Pause frame source + * address bits [47:32].Bits + * [31:0] are stored in register + * RCW0 + */ +/** @name Transmitter Configuration (TC) Register bit definitions + * @{ + */ +#define XXGE_TC_RST_MASK 0x80000000 /**< Reset */ +#define XXGE_TC_JUM_MASK 0x40000000 /**< Jumbo frame enable */ +#define XXGE_TC_FCS_MASK 0x20000000 /**< In-Band FCS enable + * (FCS not generated) + */ +#define XXGE_TC_TX_MASK 0x10000000 /**< Transmitter enable */ +#define XXGE_TC_VLAN_MASK 0x08000000 /**< VLAN frame enable */ +#define XXGE_TC_HD_MASK 0x04000000 /**< WAN Mode Enable !!chng...bit-26 we may NOT use*/ +#define XXGE_TC_IFG_MASK 0x02000000 /**< Inter-frame gap adjustment enable */ +#define XXGE_TC_DIC_MASK 0x01000000 /**< Deficit Idle Count Enable */ + + +/** @name MDIO Management Configuration (MC) Register bit definitions + * @{ + */ +#define XXGE_MDIO_CFG0_MDIOEN_MASK 0x00000040 /**< MII management enable*/ +#define XXGE_MDIO_CFG0_CLOCK_DIVIDE_MAX 0x3F /**< Maximum MDIO divisor */ +#define XXGE_MDIO_PHY_LINK_UP_MASK 0x1000 /* Checking for 12th bit */ + + +#define XXGE_MDIO_MC_MDIOPRTAD_MASK 0x1F000000 /**< PRTAD ...b28:24*/ +#define XXGE_MDIO_MC_CLOCK_DEVAD_MAX 0x001F0000 /**< DEVAD ...b20:16*/ +#define XXGE_MDIO_MC_MDIO_TXOP_MASK 0x0000C000 /**< TX OP ...b15:14*/ +#define XXGE_MDIO_CFG1_INITIATE_MASK 0x00000800 /**< Initiate ...b11 */ +#define XXGE_MDIO_CFG1_READY_MASK 0x00000080 /**< MDIO Ready ...b7*/ +#define XXGE_MDIO_CFG1_OP_SETADDR_MASK 0x00000000 /**< Opcode Set Addr Mask */ +#define XXGE_MDIO_CFG1_OP_READ_MASK 0x00008000 /**< Opcode Read Mask */ +#define XXGE_MDIO_CFG1_OP_WRITE_MASK 0x00004000 /**< Opcode Write Mask */ + +#define XXGE_MDIO_CFG1_OP_READ_MASK_10G 0x0000C000 /**< Opcode Read Mask for 10G*/ + +/*@}*/ + + +/** @name MDIO TX Data (MTX) Register bit definitions + * @{ + */ +#define XXGE_MDIO_TX_DATA_MASK 0x0000FFFF /**< MDIO TX Data ...b15:0 */ + +/** @name MDIO TX Data (MTX) Register bit definitions + * @{ + */ +#define XXGE_MDIO_RX_DATA_MASK 0x0000FFFF /**< MDIO RX Data ...b15:0 */ + + +//user define +#define XXGE_RST_DELAY_LOOPCNT_VAL 4 /**< Timeout in ticks used + * while checking if the core + * had come out of reset. The + * exact tick time is defined + * in each case/loop where it + * will be used + */ +#define XXGE_MDIO_RDY_LOOPCNT_VAL 100 // Timeout in ticks used + + + +/* PHY Control Register */ +#define PHY_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define PHY_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define PHY_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define PHY_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define PHY_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define PHY_POWER_DOWN 0x0800 /* Power down */ +#define PHY_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define PHY_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define PHY_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define PHY_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define PHY_SPEED_1000 0x0040 +#define PHY_SPEED_100 0x2000 +#define PHY_SPEED_10 0x0000 + + +/* SFP I2C */ +#define GRTNIC_I2C_CLOCK_STRETCHING_TIMEOUT 500 +#define GRTNIC_ERR_PHY_ADDR_INVALID -17 +#define GRTNIC_ERR_I2C -18 + +#define GRTNIC_I2C_EEPROM_DEV_ADDR 0xA0 +#define GRTNIC_I2C_EEPROM_DEV_ADDR2 0xA2 +#define GRTNIC_I2C_EEPROM_BANK_LEN 0xFF + +/* EEPROM byte offsets */ +#define GRTNIC_SFF_IDENTIFIER 0x0 +#define GRTNIC_SFF_IDENTIFIER_SFP 0x3 +#define GRTNIC_SFF_VENDOR_OUI_BYTE0 0x25 +#define GRTNIC_SFF_VENDOR_OUI_BYTE1 0x26 +#define GRTNIC_SFF_VENDOR_OUI_BYTE2 0x27 +#define GRTNIC_SFF_1GBE_COMP_CODES 0x6 +#define GRTNIC_SFF_10GBE_COMP_CODES 0x3 +#define GRTNIC_SFF_CABLE_TECHNOLOGY 0x8 +#define GRTNIC_SFF_CABLE_SPEC_COMP 0x3C +#define GRTNIC_SFF_SFF_8472_SWAP 0x5C +#define GRTNIC_SFF_SFF_8472_COMP 0x5E +#define GRTNIC_SFF_SFF_8472_OSCB 0x6E +#define GRTNIC_SFF_SFF_8472_ESCB 0x76 +#define GRTNIC_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define GRTNIC_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define GRTNIC_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define GRTNIC_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define GRTNIC_SFF_QSFP_CONNECTOR 0x82 +#define GRTNIC_SFF_QSFP_10GBE_COMP 0x83 +#define GRTNIC_SFF_QSFP_1GBE_COMP 0x86 +#define GRTNIC_SFF_QSFP_CABLE_LENGTH 0x92 +#define GRTNIC_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define GRTNIC_SFF_DA_PASSIVE_CABLE 0x4 +#define GRTNIC_SFF_DA_ACTIVE_CABLE 0x8 +#define GRTNIC_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define GRTNIC_SFF_1GBASESX_CAPABLE 0x1 +#define GRTNIC_SFF_1GBASELX_CAPABLE 0x2 +#define GRTNIC_SFF_1GBASET_CAPABLE 0x8 +#define GRTNIC_SFF_10GBASESR_CAPABLE 0x10 +#define GRTNIC_SFF_10GBASELR_CAPABLE 0x20 +#define GRTNIC_SFF_SOFT_RS_SELECT_MASK 0x8 +#define GRTNIC_SFF_SOFT_RS_SELECT_10G 0x8 +#define GRTNIC_SFF_SOFT_RS_SELECT_1G 0x0 +#define GRTNIC_SFF_ADDRESSING_MODE 0x4 +#define GRTNIC_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define GRTNIC_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define GRTNIC_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define GRTNIC_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define GRTNIC_I2C_EEPROM_READ_MASK 0x100 +#define GRTNIC_I2C_EEPROM_STATUS_MASK 0x3 +#define GRTNIC_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define GRTNIC_I2C_EEPROM_STATUS_PASS 0x1 +#define GRTNIC_I2C_EEPROM_STATUS_FAIL 0x2 +#define GRTNIC_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define GRTNIC_TN_LASI_STATUS_REG 0x9005 +#define GRTNIC_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance */ +#define GRTNIC_SFF_SFF_8472_UNSUP 0x00 + + +/* I2C SDA and SCL timing parameters for standard mode */ +#define GRTNIC_I2C_T_HD_STA 4 +#define GRTNIC_I2C_T_LOW 5 +#define GRTNIC_I2C_T_HIGH 4 +#define GRTNIC_I2C_T_SU_STA 5 +#define GRTNIC_I2C_T_HD_DATA 5 +#define GRTNIC_I2C_T_SU_DATA 1 +#define GRTNIC_I2C_T_RISE 1 +#define GRTNIC_I2C_T_FALL 1 +#define GRTNIC_I2C_T_SU_STO 4 +#define GRTNIC_I2C_T_BUF 5 + +#define GRTNIC_I2C_CLK_IN 0x00000001 +#define GRTNIC_I2C_CLK_OUT 0x00000002 +#define GRTNIC_I2C_DATA_IN 0x00000004 +#define GRTNIC_I2C_DATA_OUT 0x00000008 + +#define msec_delay(_x) msleep(_x) +#define usec_delay(_x) udelay(_x) + +#define DEBUGFUNC(S) do {} while (0) + +#endif /* _XDMANET_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_main.c b/drivers/net/ethernet/guangruntong/grtnic_main.c new file mode 100755 index 00000000000000..39e98373b13a9c --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_main.c @@ -0,0 +1,2116 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include + +#include +#include +#include +#ifdef NETIF_F_TSO +#include +#endif +#include +#include + +#include "grtnic.h" +#include "grtnic_macphy.h" + +#define DEFAULT_ETHER_ADDRESS "\02SUME\00" + +MODULE_AUTHOR("Beijing GRT Corporation, "); +MODULE_DESCRIPTION("GRTNIC Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(5,11,0) +MODULE_SUPPORTED_DEVICE(DRIVER_NAME); +#endif + +static const struct grt_gigeth_info *grt_gigeth_info_tbl[] = { + [board_902E_GRT_FF] = &grt_902eff_info, + [board_902T_GRT_FF] = &grt_902tff_info, + [board_901ELR_GRT_FF] = &grt_901elr_info, + [board_1001E_GRT_FF] = &grt_1001eff_info, + [board_1001E_QM_FF] = &qm_1001eff_info, + [board_1002E_GRT_FF] = &grt_1002eff_info, + [board_1005E_GRT_FX] = &grt_1005efx_info +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +static const struct pci_device_id grtnic_pci_tbl[] = { + {0x1E18, 0x0F82, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_902E_GRT_FF}, + {0x1E18, 0x0F02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_902T_GRT_FF}, + {0x1E18, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_901ELR_GRT_FF}, + {0x1E18, 0x1F81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1001E_GRT_FF}, + {0x1E18, 0x1F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1001E_QM_FF}, + {0x1E18, 0x1F82, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1002E_GRT_FF}, + {0x1E18, 0x1F25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1005E_GRT_FX}, + /* required last entry */ + {0 /* end */} +}; + +MODULE_DEVICE_TABLE(pci, grtnic_pci_tbl); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static struct workqueue_struct *grtnic_wq; + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) +static int grtnic_notify_dca(struct notifier_block *, unsigned long event, void *p); +static struct notifier_block dca_notifier = { + .notifier_call = grtnic_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif /* CONFIG_DCA */ + +#if 0 +// These are not defined in the 2.x.y kernels, so just define them +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,39) +#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x100 +#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x200 +#else +/** + * These are badly named in pre-3.6.11 kernel versions. We COULD do the same + * check as above, however (annoyingly) linux for tegra (based on post-3.6.11) + * picked up the header file from some pre-3.6.11 version, so we'll just make + * our code ugly and handle the check here: + */ +#ifndef PCI_EXP_DEVCTL2_IDO_REQ_EN +#define PCI_EXP_DEVCTL2_IDO_REQ_EN PCI_EXP_IDO_REQ_EN +#endif +#ifndef PCI_EXP_DEVCTL2_IDO_CMP_EN +#define PCI_EXP_DEVCTL2_IDO_CMP_EN PCI_EXP_IDO_CMP_EN +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + +int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + +} + +int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) +{ + if (pos & 3) + return -EINVAL; + + return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); +} + +#endif + +#endif + +static int grtnic_map_bars(struct grtnic_adapter *adapter, struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct grtnic_hw *hw = &adapter->hw; + + resource_size_t bar_start; + resource_size_t bar_end; + resource_size_t bar_len; + + bar_start = pci_resource_start(pdev, 0); + bar_end = pci_resource_end(pdev, 0); + bar_len = bar_end - bar_start + 1; + + hw->user_bar_len = bar_len; + hw->user_bar = pci_ioremap_bar(pdev, 0); + + if (!hw->user_bar) + { + dev_err(dev, "Could not map USER BAR"); + return -1; + } + dev_info(dev, "USER BAR mapped at 0x%p with length %llu", hw->user_bar, bar_len); + + bar_start = pci_resource_start(pdev, 1); + bar_end = pci_resource_end(pdev, 1); + bar_len = bar_end - bar_start + 1; + + hw->dma_bar_len = bar_len; + hw->dma_bar = pci_ioremap_bar(pdev, 1); + + if (!hw->dma_bar) + { + dev_err(dev, "Could not map DMA BAR"); + return -1; + } + + dev_info(dev, "DMA BAR mapped at 0x%p with length %llu", hw->dma_bar, bar_len); + + return 0; +} + +static void grtnic_free_bars(struct grtnic_adapter *adapter, struct pci_dev *pdev) +{ + struct grtnic_hw *hw = &adapter->hw; + + if(hw->user_bar) + pci_iounmap(pdev, hw->user_bar); + if(hw->dma_bar) + pci_iounmap(pdev, hw->dma_bar); +} + + +void grtnic_napi_enable_all(struct grtnic_adapter *adapter) +{ + struct grtnic_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef HAVE_NDO_BUSY_POLL + grtnic_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } +} + +void grtnic_napi_disable_all(struct grtnic_adapter *adapter) +{ + struct grtnic_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef HAVE_NDO_BUSY_POLL + while(!grtnic_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif + } +} + +///////////////////////////////////////////////////////////////////////////////////// + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + +static void grtnic_update_tx_dca(struct grtnic_adapter *adapter, struct grtnic_ring *tx_ring, int cpu) +{ + u32 txctrl = 0; + u16 reg_idx = tx_ring->reg_idx; + + if (adapter->flags & GRTNIC_GRTNIC_FLAG_DCA_ENABLED) + txctrl = dca3_get_tag(tx_ring->dev, cpu); + + txctrl <<= GRTNIC_DCA_TXCTRL_CPUID_SHIFT; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= GRTNIC_DCA_TXCTRL_DESC_RRO_EN | + GRTNIC_DCA_TXCTRL_DATA_RRO_EN | + GRTNIC_DCA_TXCTRL_DESC_DCA_EN; + + write_register(txctrl, adapter->dma_bar+ (TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DCA_RXTXCTL*4); + +} + +static void grtnic_update_rx_dca(struct grtnic_adapter *adapter, struct grtnic_ring *rx_ring, int cpu) +{ + u32 rxctrl = 0; + u8 reg_idx = rx_ring->reg_idx; + + if (adapter->flags & GRTNIC_GRTNIC_FLAG_DCA_ENABLED) + rxctrl = dca3_get_tag(rx_ring->dev, cpu); + + rxctrl <<= GRTNIC_DCA_RXCTRL_CPUID_SHIFT; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= GRTNIC_DCA_RXCTRL_DESC_RRO_EN | + GRTNIC_DCA_RXCTRL_DATA_DCA_EN | + GRTNIC_DCA_RXCTRL_DESC_DCA_EN; + + write_register(rxctrl, adapter->dma_bar+ (TARGET_C2H<<12) + (reg_idx<<8) + ADDR_DCA_RXTXCTL*4); +} + +void grtnic_update_dca(struct grtnic_q_vector *q_vector) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_ring *ring; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + grtnic_for_each_ring(ring, q_vector->tx) + grtnic_update_tx_dca(adapter, ring, cpu); + + grtnic_for_each_ring(ring, q_vector->rx) + grtnic_update_rx_dca(adapter, ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +void grtnic_setup_dca(struct grtnic_adapter *adapter) +{ + int v_idx; + + /* always use CB2 mode, difference is masked in the CB driver */ + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) + write_register(GRTNIC_DCA_CTRL_DCA_MODE_CB2, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + else + write_register(GRTNIC_DCA_CTRL_DCA_DISABLE, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + adapter->q_vector[v_idx]->cpu = -1; + grtnic_update_dca(adapter->q_vector[v_idx]); + } +} + +static int __grtnic_notify_dca(struct device *dev, void *data) +{ + struct grtnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long event = *(unsigned long *)data; + + if (!(adapter->flags & GRTNIC_FLAG_DCA_CAPABLE)) + return 0; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if we're already enabled, don't do it again */ + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= GRTNIC_FLAG_DCA_ENABLED; + write_register(GRTNIC_DCA_CTRL_DCA_MODE_CB2, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + break; + } + /* fall through - DCA is disabled */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) { + dca_remove_requester(dev); + adapter->flags &= ~GRTNIC_FLAG_DCA_ENABLED; + write_register(GRTNIC_DCA_CTRL_DCA_DISABLE, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + } + break; + } + + return 0; +} +#endif /* CONFIG_DCA */ + + +/** + * grtnic_rss_indir_tbl_entries - Return RSS indirection table entries + * @adapter: device handle + * + * - 82598/82599/X540: 128 + * - X550(non-SRIOV mode): 512 + * - X550(SRIOV mode): 64 + */ +u32 grtnic_rss_indir_tbl_entries(struct grtnic_adapter *adapter) +{ + return 128; +} + +/** + * grtnic_store_key - Write the RSS key to HW + * @adapter: device handle + * + * Write the RSS key stored in adapter.rss_key to HW. + */ +void grtnic_store_key(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < 10; i++) + GRTNIC_WRITE_REG(hw, (RSS_KEY_BEGIN + i*4), adapter->rss_key[i], 0); +} + +/** + * grtnic_init_rss_key - Initialize adapter RSS key + * @adapter: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int grtnic_init_rss_key(struct grtnic_adapter *adapter) +{ + +// static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, +// 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, +// 0xA32DCB77, 0x0CF23080, 0x3BB7426A, +// 0xFA01ACBE }; + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(GRTNIC_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, GRTNIC_RSS_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +} + +/** + * grtnic_store_reta - Write the RETA table to HW + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void grtnic_store_reta(struct grtnic_adapter *adapter) +{ + u32 i = 0, reta_entries = grtnic_rss_indir_tbl_entries(adapter); + struct grtnic_hw *hw = &adapter->hw; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Write redirection table to HW */ + while (i < reta_entries) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= indir_tbl[i + j]; + } + + GRTNIC_WRITE_REG(hw, (RSS_RETA_BEGIN + i), val, 0); + i += 4; + + } +} + +static void grtnic_setup_reta(struct grtnic_adapter *adapter) +{ + u32 i, j; + u32 reta_entries = grtnic_rss_indir_tbl_entries(adapter); +// u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + u16 rss_i = adapter->rss_queues; + + /* Fill out hash function seeds */ + grtnic_store_key(adapter); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + grtnic_store_reta(adapter); +} + +/** + * grtnic_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ + +void grtnic_setup_mrqc(struct grtnic_adapter *adapter) +{ +// u32 mrqc = 0, rss_field = 0; //暂时没用到,这个地方可以设置RSS的方式 + grtnic_setup_reta(adapter); +// mrqc |= rss_field; +// IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); +} + + +//////////////////////////////////////////////////////////////////////////////////////////// +void grtnic_irq_disable(struct grtnic_adapter *adapter) +{ + u32 var; + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + var = adapter->eims_enable_mask; + else + var = ~0; + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), var, 1); + GRTNIC_WRITE_FLUSH(&adapter->hw); //flush + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + int vector; + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); //other + + } else { + synchronize_irq(adapter->pdev->irq); + } + +} + +void grtnic_irq_enable(struct grtnic_adapter *adapter) +{ + u32 var; + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + var = adapter->eims_enable_mask; + else + var = ~0; + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IAM*4), var, 1); //当发出中断后自动禁止所有中断 + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), var, 1); //enable all interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); //flush +} + + +void grtnic_free_irq(struct grtnic_adapter *adapter) +{ + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + int vector = 0, i; + + for (i = 0; i < adapter->num_q_vectors; i++) + { +#ifdef HAVE_IRQ_AFFINITY_HINT + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); +#endif + free_irq(adapter->msix_entries[vector++].vector, adapter->q_vector[i]); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); //other + + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +#define N0_QUEUE -1 +static void grtnic_assign_vector(struct grtnic_q_vector *q_vector, int msix_vector) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + int rx_queue = N0_QUEUE; + int tx_queue = N0_QUEUE; + u8 ivar; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + if (rx_queue > N0_QUEUE) + { + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((rx_queue*2+1)<<16 | 0x01), 1); //1: c2s eop interrupt mode + ivar = adapter->ivar[rx_queue]; + /* clear any bits that are currently set */ + ivar &= 0x0F; + ivar |= (msix_vector <<4); + adapter->ivar[rx_queue] = ivar; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR*4), (rx_queue<<16 | ivar), 1); + } + + if (tx_queue > N0_QUEUE) + { + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((tx_queue*2)<<16 | 0x00), 1); //0:s2c normal interrupt 1: no desc wb & no interrupt + ivar = adapter->ivar[tx_queue]; + /* clear any bits that are currently set */ + ivar &= 0xF0; + ivar |= msix_vector; + adapter->ivar[tx_queue] = ivar; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR*4), (tx_queue<<16 | ivar), 1); + } + + q_vector->eims_value = BIT(msix_vector); + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + +} + + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * @adapter: board private structure + * + **/ +void grtnic_configure_msi_and_legacy(struct grtnic_adapter *adapter) +{ + struct grtnic_q_vector *q_vector = adapter->q_vector[0]; + + grtnic_write_itr(q_vector); + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((0*2+1)<<16 | 0x01), 1); //1: c2s eop interrupt mode + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((0*2)<<16 | 0x00), 1); //0:s2c normal interrupt 1: no desc wb & no interrupt + + adapter->eims_other = BIT(0); + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR_MISC*4), 0, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR*4), (0<<16 | 0x11), 1); +} + +/** + * grtnic_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * grtnic_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +void grtnic_configure_msix(struct grtnic_adapter *adapter) +{ + int i, vector = 0; + + adapter->eims_enable_mask = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + { + grtnic_assign_vector(adapter->q_vector[i], vector++); + grtnic_write_itr(adapter->q_vector[i]); + } + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR_MISC*4), vector, 1); + + adapter->eims_enable_mask |= adapter->eims_other; + + GRTNIC_WRITE_FLUSH(&adapter->hw); //flush +} + +/** + * grtnic_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure + * + * grtnic_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int grtnic_request_msix(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + unsigned int ri = 0, ti = 0; + int vector, err; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct grtnic_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-TxRx-%u", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-rx-%u", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-tx-%u", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(entry->vector, &grtnic_msix_ring, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt '%s' " + "Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* If Flow Director is enabled, set interrupt affinity */ +// if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(adapter->msix_entries[vector].vector, &q_vector->affinity_mask); +// } +#endif /* HAVE_IRQ_AFFINITY_HINT */ + } + + err = request_irq(adapter->msix_entries[vector].vector, &grtnic_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return GRTNIC_SUCCESS; + +free_queue_irqs: + while (vector) { + vector--; +#ifdef HAVE_IRQ_AFFINITY_HINT + irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); +#endif + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~GRTNIC_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + + +int grtnic_request_irq(struct grtnic_adapter *adapter) +{ + int irq_flag; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + err = grtnic_request_msix(adapter); + + else + { + irq_flag = (adapter->flags & GRTNIC_FLAG_MSI_ENABLED) ? 0 : IRQF_SHARED; + err = request_irq(pdev->irq, grtnic_isr, irq_flag, DRIVER_NAME, adapter); + } + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +void grtnic_reset_interrupt_capability(struct grtnic_adapter *adapter) +{ + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + adapter->flags &= ~GRTNIC_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & GRTNIC_FLAG_MSI_ENABLED) { + adapter->flags &= ~GRTNIC_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + + +/** + * grtnic_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool grtnic_set_rss_queues(struct grtnic_adapter *adapter) +{ + u16 rss_i = adapter->rss_queues; + + adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = rss_i; +#endif + + return true; +} + + +/* + * grtnic_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void grtnic_set_num_queues(struct grtnic_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + grtnic_set_rss_queues(adapter); +} + +/** + * grtnic_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int grtnic_acquire_msix_vectors(struct grtnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, vectors; + + if (!(adapter->flags & GRTNIC_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair with XDP queues + * being stacked with TX queues. + */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + /* if tx handler is separate make it 1 for every queue */ +// if (!(adapter->flags & FLAG_QUEUE_PAIRS)) +// vectors = adapter->num_tx_queues + adapter->num_rx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = vectors; + + /* add 1 vector for link status interrupts */ + vectors++; + + adapter->msix_entries = kcalloc(vectors, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(pdev, adapter->msix_entries, vectors, vectors); + + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~GRTNIC_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= GRTNIC_FLAG_MSIX_ENABLED; + return 0; +} + + +void grtnic_set_interrupt_capability(struct grtnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int err, i; + + for(i=0; iivar[i] = 0; + + /* We will try to get MSI-X interrupts first */ + if (!grtnic_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->rss_queues = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + grtnic_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & GRTNIC_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", + err); + else + adapter->flags |= GRTNIC_FLAG_MSI_ENABLED; +} + +/** + * grtnic_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void grtnic_free_q_vector(struct grtnic_adapter *adapter, int v_idx) +{ + struct grtnic_q_vector *q_vector = adapter->q_vector[v_idx]; + /* if we're coming from grtnic_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +static void grtnic_add_ring(struct grtnic_ring *ring, struct grtnic_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * grtnic_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int grtnic_alloc_q_vector(struct grtnic_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct grtnic_q_vector *q_vector; + struct grtnic_ring *ring; + int node = -1; +#ifdef HAVE_IRQ_AFFINITY_HINT + int cpu = -1; + u8 tcs = 0; +// u8 tcs = netdev_get_num_tc(adapter->netdev); +#endif + int ring_count,size; + + /* only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = sizeof(struct grtnic_q_vector) + + (sizeof(struct grtnic_ring) * ring_count); + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* customize cpu for Flow Director mapping */ + if (tcs <= 1) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + +#endif + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); +#endif + q_vector->node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + + /* initialize NAPI */ +// netif_napi_add(adapter->netdev, &q_vector->napi, grtnic_poll, 64); + netif_napi_add(adapter->netdev, &q_vector->napi, grtnic_poll); + +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif +#endif + +#ifdef HAVE_NDO_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_DISABLE); + +#endif + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* Initialize setting for adaptive ITR */ + q_vector->tx.itr = ITR_ADAPTIVE_MAX_USECS | ITR_ADAPTIVE_LATENCY; + q_vector->rx.itr = ITR_ADAPTIVE_MAX_USECS | ITR_ADAPTIVE_LATENCY; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = GRTNIC_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = GRTNIC_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* initialize pointer to rings */ + ring = q_vector->ring; + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = adapter->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + grtnic_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = adapter->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + grtnic_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * grtnic_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void grtnic_free_q_vectors(struct grtnic_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + grtnic_free_q_vector(adapter, v_idx); +} + +/** + * grtnic_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int grtnic_alloc_q_vectors(struct grtnic_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int i; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = grtnic_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = grtnic_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + if (adapter->rx_ring[i]) + adapter->rx_ring[i]->reg_idx = i; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]) + adapter->tx_ring[i]->reg_idx = i; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + grtnic_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * grtnic_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +void grtnic_clear_interrupt_scheme(struct grtnic_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + grtnic_free_q_vectors(adapter); + grtnic_reset_interrupt_capability(adapter); +} + +/** + * grtnic_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int grtnic_init_interrupt_scheme(struct grtnic_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + grtnic_set_num_queues(adapter); + + /* Set interrupt mode */ + grtnic_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = grtnic_alloc_q_vectors(adapter); + + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + grtnic_reset_interrupt_capability(adapter); + return err; + } + +// ixgbe_cache_ring_register(adapter); + + set_bit(__GRTNIC_DOWN, &adapter->state); + + return GRTNIC_SUCCESS; +} + + +static int grtnic_sw_init(struct grtnic_adapter *adapter) +{ + int i, err = 0; + struct grtnic_ring *tx_ring, *rx_ring; + int card_type = adapter->ei->type; + + if (grtnic_init_rss_key(adapter)) { + err = GRTNIC_ERR_OUT_OF_MEM; + e_err(probe, "rss_key allocation failed: %d\n", err); + goto out; + } + +//针对每个卡的phy设置,可以放在这里 + #if IS_ENABLED(CONFIG_DCA) + adapter->flags |= GRTNIC_FLAG_DCA_CAPABLE; + #endif + adapter->flags |= (GRTNIC_FLAG_MSI_CAPABLE | \ + GRTNIC_FLAG_MSIX_CAPABLE | \ + GRTNIC_FLAG_MQ_CAPABLE); + +// /* default flow control settings */ +// hw->fc.requested_mode = ixgbe_fc_full; +// hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ +// +// adapter->last_lfc_mode = hw->fc.current_mode; +// ixgbe_pbthresh_setup(adapter); +// hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; +// hw->fc.send_xon = true; +// hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = GRTNIC_DEFAULT_TXD; + adapter->rx_ring_count = GRTNIC_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = GRTNIC_DEFAULT_TX_WORK; + + adapter->max_frame_size = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; //1500+18+4 + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; //60+4 + + grtnic_PhySetMdioDivisor(adapter->netdev, 24); + + if(card_type == board_902T_GRT_FF) //FF902T + { + u16 temp; + u32 phy_addr = adapter->hw.phy_addr; + + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, &temp); + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, temp | PHY_RESET); //rst phy + + //clear EEE LED + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x1F, 0xd04); //1F:change page, d04:ExtPage + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x11, 0x00); //EEELCR + // grtnic_PhyWrite(adapter->netdev[i], 0x01, 0x10, 0x207B); // LED config + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x10, 0x0D1B); // LED config + + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x1F, 0x00); //page 0 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0D, 0x07); //1 is phy add, 0d is MACR reg, 7 is device address + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0E, 0x3C); //1 is phy add, 0e is MAADR reg, 0x3C is reg address + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0D, 0x4007); //1 is phy add, 0d is MACR reg, 4007 get data from device add 7 & reg 14 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0E, 0x00); // EEEAR + } + +// else if(card_type == board_904T_GRT_FF || card_type == board_904E_GRT_FF)//FF904T & FF904E +// { +// u16 temp; +// for (i = 0; i < grtnic_ports_max; i++) +// { +// grtnic_PhyWrite(adapter->netdev[0], i, 0x1E, 0x00); //utp_ext_reg +// grtnic_PhyRead (adapter->netdev[0], i, 0x1F, &temp); +// +// grtnic_PhyWrite(adapter->netdev[0], i, 0x1E, 0x00); +// grtnic_PhyWrite(adapter->netdev[0], i, 0x1F, temp | 0x20); //jumbo enable +// +// grtnic_PhyRead (adapter->netdev[0], i, 0x00, &temp); +// grtnic_PhyWrite(adapter->netdev[0], i, 0x00, temp | 0x8000); //sw reset +// } +// +// } + + for (i = 0; i < adapter->num_tx_queues; i++) + { + + tx_ring = adapter->tx_ring[i]; + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); + } + +//----------------------------------------------------------------------------------- + for (i = 0; i < adapter->num_rx_queues; i++) + { + rx_ring = adapter->rx_ring[i]; + + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); + } + + set_bit(__GRTNIC_DOWN, &adapter->state); + +out: + return err; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +/** + * grtnic_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_update_link(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + u32 xphy_status; + + if (!(adapter->flags & GRTNIC_FLAG_NEED_LINK_UPDATE)) + return; + + xphy_status = GRTNIC_READ_REG(hw, XPHY_STATUS, 0); + link_up = (xphy_status & 1) ? 1:0; + link_speed = (xphy_status >> 1) & 0x03; + +// if (link_up) { +// if (hw->phy.media_type == ixgbe_media_type_copper && +// (ixgbe_device_supports_autoneg_fc(hw))) +// ixgbe_setup_fc(hw); +// hw->mac.ops.fc_enable(hw); +// +// } + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + GRTNIC_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~GRTNIC_FLAG_NEED_LINK_UPDATE; + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), adapter->eims_other, 1); //打开相应的中断,user_interrupt + GRTNIC_WRITE_FLUSH(hw); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +/** + * grtnic_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_link_is_up(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 link_speed = adapter->link_speed; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + if(adapter->type==1) //copper rtl8211 + { + grtnic_ResetRx(netdev); + grtnic_SetSpeed(netdev, link_speed); + } + grtnic_SetRx(netdev, 1); //start rx + + e_info(drv, "NIC Link is Up\n"); + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); +} + +/** + * grtnic_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void grtnic_watchdog_link_is_down(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + grtnic_SetRx(netdev, 0); //stop rx + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); +} + +static bool grtnic_ring_tx_pending(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +/** + * grtnic_watchdog_flush_tx - flush queues on link down + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_flush_tx(struct grtnic_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (grtnic_ring_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); + set_bit(__GRTNIC_RESET_REQUESTED, &adapter->state); + } + } +} + +/** + * grtnic_watchdog_subtask - check and bring link up + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_subtask(struct grtnic_adapter *adapter) +{ + /* if interface is down, removing or resetting, do nothing */ + if (test_bit(__GRTNIC_DOWN, &adapter->state) || + test_bit(__GRTNIC_REMOVING, &adapter->state) || + test_bit(__GRTNIC_RESETTING, &adapter->state)) + return; + + grtnic_watchdog_update_link(adapter); + + if (adapter->link_up) + grtnic_watchdog_link_is_up(adapter); + else + grtnic_watchdog_link_is_down(adapter); + +// grtnic_update_stats(adapter); //要检查 + + grtnic_watchdog_flush_tx(adapter); +} + + +void grtnic_service_event_schedule(struct grtnic_adapter *adapter) +{ + if (!test_bit(__GRTNIC_DOWN, &adapter->state) && + !test_bit(__GRTNIC_REMOVING, &adapter->state) && + !test_and_set_bit(__GRTNIC_SERVICE_SCHED, &adapter->state)) + queue_work(grtnic_wq, &adapter->service_task); +} + +static void grtnic_service_event_complete(struct grtnic_adapter *adapter) +{ + BUG_ON(!test_bit(__GRTNIC_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__GRTNIC_SERVICE_SCHED, &adapter->state); +} + +static void grtnic_remove_adapter(struct grtnic_hw *hw) +{ + struct grtnic_adapter *adapter = hw->back; + + if ((!hw->dma_bar) || (!hw->user_bar)) + return; + hw->dma_bar = NULL; + hw->user_bar = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__GRTNIC_SERVICE_INITED, &adapter->state)) + grtnic_service_event_schedule(adapter); +} + +static u32 grtnic_check_remove(struct grtnic_hw *hw, u32 reg, u8 bar) +{ + u8 __iomem *reg_addr; + u8 __iomem *userbar_reg_addr; + u32 value; + int i; + + reg_addr = bar ? hw->dma_bar : hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return GRTNIC_FAILED_READ_REG; + + userbar_reg_addr = READ_ONCE(hw->user_bar); + /* Register read of 0xFFFFFFFF can indicate the adapter has been + * removed, so perform several status register reads to determine if + * the adapter has been removed. + */ + for (i = 0; i < GRTNIC_FAILED_READ_RETRIES; ++i) { + value = readl(userbar_reg_addr + XPHY_STATUS); + if (value != GRTNIC_FAILED_READ_REG) + break; + mdelay(3); + } + + if (value == GRTNIC_FAILED_READ_REG) + grtnic_remove_adapter(hw); + else + value = readl(reg_addr + reg); + + return value; +} + +static u32 grtnic_validate_register_read(struct grtnic_hw *_hw, u32 reg, u8 bar) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct grtnic_adapter *adapter = _hw->back; + + reg_addr = bar ? _hw->dma_bar : _hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return GRTNIC_FAILED_READ_REG; + for (i = 0; i < GRTNIC_DEAD_READ_RETRIES; ++i) { + value = readl(reg_addr + reg); + if (value != GRTNIC_DEAD_READ_REG) + break; + } + + if (value == GRTNIC_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +u32 grtnic_read_reg(struct grtnic_hw *hw, u32 reg, u8 bar) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = bar ? hw->dma_bar : hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return GRTNIC_FAILED_READ_REG; + + value = readl(reg_addr + reg); + if (unlikely(value == GRTNIC_FAILED_READ_REG)) + value = grtnic_check_remove(hw, reg, bar); + if (unlikely(value == GRTNIC_DEAD_READ_REG)) + value = grtnic_validate_register_read(hw, reg, bar); + return value; +} + +/** + * grtnic_service_timer - Timer Call-back + * @t: pointer to timer_list + **/ +static void grtnic_service_timer(struct timer_list *t) +{ + struct grtnic_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + + /* poll faster when waiting for link */ + if (adapter->flags & GRTNIC_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + grtnic_service_event_schedule(adapter); +} + +/** + * grtnic_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void grtnic_service_task(struct work_struct *work) +{ + struct grtnic_adapter *adapter = container_of(work, struct grtnic_adapter, service_task); + if (GRTNIC_REMOVED(adapter->hw.dma_bar)) { + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) { + rtnl_lock(); + grtnic_down(adapter); + rtnl_unlock(); + } + grtnic_service_event_complete(adapter); + return; + } + +// ixgbe_reset_subtask(adapter); +// ixgbe_phy_interrupt_subtask(adapter); +// ixgbe_sfp_detection_subtask(adapter); +// ixgbe_sfp_link_config_subtask(adapter); +// ixgbe_check_overtemp_subtask(adapter); + grtnic_watchdog_subtask(adapter); +// ixgbe_check_hang_subtask(adapter); + grtnic_service_event_complete(adapter); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct grtnic_adapter *adapter = NULL; + struct grtnic_hw *hw = NULL; + struct device *dev = &pdev->dev; + static int cards_found; + int err, pci_using_dac; + int csum_tx_mode = 0, csum_rx_mode = 0; + +#ifdef HAVE_TX_MQ + unsigned int indices = MAX_TX_QUEUES; +#endif /* HAVE_TX_MQ */ + bool disable_dev = false; + + u32 coresettings; + u8 mac_addr[6]; + + const struct grt_gigeth_info *ei = grt_gigeth_info_tbl[ent->driver_data]; //根据vidpid来配置对应的driver_data + + dev_info(dev, "adapter PCI probe"); + + // Enable device + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_mem_regions(pdev, DRIVER_NAME); + if (err) { + dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + // Disable ASPM + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif /* HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING */ + + // Enable bus mastering for DMA + pci_set_master(pdev); + +#ifdef HAVE_TX_MQ + indices = min_t(int, ei->dma_channel_max, num_online_cpus()); + netdev = alloc_etherdev_mq(sizeof(struct grtnic_adapter), indices); +#else /* HAVE_TX_MQ */ + netdev = alloc_etherdev(sizeof(struct grtnic_adapter)); +#endif /* HAVE_TX_MQ */ + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, dev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->dev = dev; + adapter->pdev = pdev; + adapter->func = PCI_FUNC(pdev->devfn); + adapter->ei = ei; + hw = &adapter->hw; + hw->back = adapter; + if(ei->type == board_902T_GRT_FF) + hw->phy_addr = 0x01; + else + hw->phy_addr = 0x00; + + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->flags = 0; + + adapter->type = ei->port_type; //fiber or copper? + adapter->speed = ei->port_speed; + + adapter->rss_queues = 1; +#ifdef HAVE_TX_MQ + adapter->rss_queues = indices; + printk("rss_queues = %d\n", adapter->rss_queues); +#endif + +// adapter->flags |= FLAG_QUEUE_PAIRS; + + // Map BARs + err = grtnic_map_bars(adapter, pdev); + if (err) + { + dev_err(dev, "Failed to map bar"); + err = -EIO; + goto err_ioremap; + } + + grtnic_assign_netdev_ops(netdev); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + adapter->bd_number = cards_found; + + /* setup adapter struct */ + err = grtnic_sw_init(adapter); + if (err) + goto err_sw_init; + + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + grtnic_check_options(adapter); //内核加载参数这里设置 + +//reset_hw + coresettings = GRTNIC_READ_REG(hw, ((TARGET_CONFIG<<12) + ADDR_CORESETTINGS*4), 1); //main reason is for reset dma (RESET_LOGIC) + printk("Number of channels:%d\n", ((coresettings>>0) & 0xf)); + printk("Bus interface width:%d\n", ((coresettings>>19) & 0xf)*32); + printk("Bus master enable:%d\n", ((coresettings>>4) & 0x1)); + printk("Negotiated link width:X%d\n", ((coresettings>>5) & 0x3f)); + printk("Negotiated link rate:%d MTs\n", ((coresettings>>11) & 0x3)*2500); + printk("Max downstream payload:%d bytes\n", 128 << ((coresettings>>13) & 0x7)); + printk("Max upstream payload:%d bytes\n", 128 << ((coresettings>>16) & 0x7)); + + if(coresettings==GRTNIC_FAILED_READ_REG) { + e_dev_err("HW Init failed\n"); + goto err_sw_init; + } + + GRTNIC_WRITE_REG(hw, ASIC_RX_FIFO_RST, 0xff, 0); //reset all channel rx fifo data + GRTNIC_WRITE_REG(hw, ASIC_TX_FIFO_RST, 0xff, 0); //reset all channel tx fifo data + + if(adapter->flags & GRTNIC_FLAG_TXCSUM_CAPABLE) csum_tx_mode = 1; + if(adapter->flags & GRTNIC_FLAG_RXCSUM_CAPABLE) csum_rx_mode = 1; + + GRTNIC_WRITE_REG(hw, CSUM_ENABLE, (csum_rx_mode << 1 | csum_tx_mode), 0); //告诉asic, tx checksum offload + GRTNIC_WRITE_REG(hw, MAX_LED_PKT_NUM, (100<<16 | 1), 0); //200 is delay time and 1 is pkt number + + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_GSO; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + +// netdev->flags &= ~IFF_MULTICAST; + + if(csum_tx_mode) + netdev->features |= NETIF_F_HW_CSUM; +#ifdef NETIF_F_RXCSUM + if(csum_rx_mode) + netdev->features |= NETIF_F_RXCSUM; +#endif +#ifdef NETIF_F_RXHASH + netdev->features |= NETIF_F_RXHASH; +#endif /* NETIF_F_RXHASH */ + +#if defined(HAVE_NDO_SET_FEATURES) && !defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) + netdev->hw_features = netdev->features; +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9710 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = GRTNIC_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = GRTNIC_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#endif //HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#endif //HAVE_NETDEVICE_MIN_MAX_MTU + + hw->mac.fc.fc_autoneg = false; + hw->mac.fc.current_mode = fc_rx_pause; + + grtnic_GetMacAddress(netdev, mac_addr); + + if (is_valid_ether_addr((unsigned char *)(mac_addr))) + eth_hw_addr_set(netdev, mac_addr); + else + { + memcpy(mac_addr, DEFAULT_ETHER_ADDRESS, netdev->addr_len); + mac_addr[netdev->addr_len-1] = adapter->func; + eth_hw_addr_set(netdev, mac_addr); + grtnic_SetMacAddress(netdev, netdev->dev_addr); //added + } + +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, mac_addr, netdev->addr_len); +#endif + + grtnic_SetMacPauseAddress(netdev, netdev->dev_addr); + + grtnic_SetPause(netdev, 1); //rx pause, tx off + + printk("add=%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->dev_addr[5],netdev->dev_addr[4],netdev->dev_addr[3],netdev->dev_addr[2],netdev->dev_addr[1],netdev->dev_addr[0]); + + grtnic_ResetRx(netdev); + grtnic_SetRx(netdev, 0); //disable rx + grtnic_ResetTx(netdev); + grtnic_SetTx(netdev, 0); //disable tx + + timer_setup(&adapter->service_timer, grtnic_service_timer, 0); + INIT_WORK(&adapter->service_task, grtnic_service_task); + + set_bit(__GRTNIC_SERVICE_INITED, &adapter->state); + clear_bit(__GRTNIC_SERVICE_SCHED, &adapter->state); + + err = grtnic_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + +// err = hw->mac.ops.start_hw(hw); +//主要调用了ixgbe_start_hw_generic:Clear statistics registers & Setup flow control + + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + pci_set_drvdata(pdev, adapter); + err = register_netdev(netdev); + if (err) + goto err_register; + adapter->netdev_registered = true; + + +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + +#endif + + /* power down the optics for 82599 SFP+ fiber */ + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x01, 1); //disable laser; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & GRTNIC_FLAG_DCA_CAPABLE) { + ret = dca_add_requester(pci_dev_to_dev(pdev)); + switch (ret) { + case 0: + adapter->flags |= GRTNIC_FLAG_DCA_ENABLED; + grtnic_setup_dca(adapter); + break; + /* -19 is returned from the kernel when no provider is found */ + case -19: + printk("No DCA provider found. Please " + "start ioatdma for DCA functionality.\n"); + break; + default: + printk("DCA registration failed: %d\n", ret); + break; + } + } +#endif + + cards_found++; + +#ifdef GRTNIC_PROCFS + if (grtnic_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* IXGBE_PROCFS */ + + // probe complete + return 0; + +err_register: + grtnic_clear_interrupt_scheme(adapter); +err_sw_init: + kfree(adapter->rss_key); + grtnic_free_bars(adapter, pdev); +err_ioremap: + disable_dev = !test_and_set_bit(__GRTNIC_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + + +static void grtnic_pci_remove(struct pci_dev *pdev) +{ + struct grtnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + bool disable_dev; + + dev_info(&pdev->dev, "grtnic PCI remove"); + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; + + set_bit(__GRTNIC_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) { + adapter->flags &= ~GRTNIC_FLAG_DCA_ENABLED; + dca_remove_requester(pci_dev_to_dev(pdev)); + write_register(GRTNIC_DCA_CTRL_DCA_DISABLE, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + } +#endif /* CONFIG_DCA */ + +#ifdef GRTNIC_PROCFS + grtnic_procfs_exit(adapter); +#endif /* GRTNIC_PROCFS */ + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + grtnic_clear_interrupt_scheme(adapter); + grtnic_free_bars(adapter, pdev); + pci_release_regions(pdev); + kfree(adapter->rss_key); + disable_dev = !test_and_set_bit(__GRTNIC_DISABLED, &adapter->state); + free_netdev(netdev); + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(pdev); +#endif /* HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING */ + + if (disable_dev) + pci_disable_device(pdev); +} + + +/* + * __grtnic_shutdown is not used when power management + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) +static int __grtnic_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct grtnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; +// u32 wufc = adapter->wol; + u32 wufc = 0; +#ifdef CONFIG_PM + int retval = 0; +#endif + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + grtnic_close_suspend(adapter); + + grtnic_clear_interrupt_scheme(adapter); + rtnl_unlock(); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + + *enable_wake = !!wufc; + + if (!test_and_set_bit(__GRTNIC_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ + + +#ifndef USE_REBOOT_NOTIFIER +static void grtnic_pci_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __grtnic_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#endif + + +static struct pci_driver grtnic_pci_driver = { + .name = DRIVER_NAME, + .id_table = grtnic_pci_tbl, + .probe = grtnic_pci_probe, + .remove = grtnic_pci_remove, +#ifndef USE_REBOOT_NOTIFIER + .shutdown = grtnic_pci_shutdown, +#endif +}; + +static int __init grtnic_init(void) +{ + int ret; + printk("Beijing GRT(R) NIC Network Driver - %s\n", DRIVER_VERSION); + printk("Copyright(c) 2020-2022 Beijing GRT Corporation.\n"); + + grtnic_wq = create_singlethread_workqueue(DRIVER_NAME); + if (!grtnic_wq) { + pr_err("%s: Failed to create workqueue\n", DRIVER_NAME); + return -ENOMEM; + } + +#ifdef GRTNIC_PROCFS + if (grtnic_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + + ret = pci_register_driver(&grtnic_pci_driver); + if (ret) + { + destroy_workqueue(grtnic_wq); +#ifdef GRTNIC_PROCFS + grtnic_procfs_topdir_exit(); +#endif + return ret; + } + +//#if IS_ENABLED(CONFIG_DCA) +// dca_register_notify(&dca_notifier); +//#endif + + return ret; +} + +static void __exit grtnic_exit(void) +{ +//#if IS_ENABLED(CONFIG_DCA) +// dca_unregister_notify(&dca_notifier); +//#endif + pci_unregister_driver(&grtnic_pci_driver); +#ifdef GRTNIC_PROCFS + grtnic_procfs_topdir_exit(); +#endif + destroy_workqueue(grtnic_wq); +} + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) +static int grtnic_notify_dca(struct notifier_block __always_unused *nb, unsigned long event, void __always_unused *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&grtnic_pci_driver.driver, NULL, &event, + __grtnic_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif + +const struct grt_gigeth_info grt_902eff_info = { + .type = board_902E_GRT_FF, + .dma_channel_max = 1, + .port_type = 0, + .port_speed = 0, +}; + +const struct grt_gigeth_info grt_902tff_info = { + .type = board_902T_GRT_FF, + .dma_channel_max = 1, + .port_type = 1, + .port_speed = 0, +}; + +const struct grt_gigeth_info grt_901elr_info = { + .type = board_901ELR_GRT_FF, + .dma_channel_max = 1, + .port_type = 0, + .port_speed = 0, +}; + +const struct grt_gigeth_info grt_1002eff_info = { + .type = board_1002E_GRT_FF, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +const struct grt_gigeth_info grt_1001eff_info = { + .type = board_1001E_GRT_FF, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +const struct grt_gigeth_info qm_1001eff_info = { + .type = board_1001E_QM_FF, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +const struct grt_gigeth_info grt_1005efx_info = { + .type = board_1005E_GRT_FX, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +module_init(grtnic_init); +module_exit(grtnic_exit); \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_netdev.c b/drivers/net/ethernet/guangruntong/grtnic_netdev.c new file mode 100755 index 00000000000000..368350456f6c08 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_netdev.c @@ -0,0 +1,3451 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include + +#include +#include +#include +#ifdef NETIF_F_TSO +#include +#endif +#include +#include + +#include "grtnic.h" +#include "grtnic_nvm.h" +#include "grtnic_macphy.h" + +/* only works for sizes that are powers of 2 */ +#define GRTNIC_ROUNDUP_SIZE(i, size) ( (size) - ((i) & ((size) - 1)) ) + +static void grtnic_clean_tx_ring(struct grtnic_ring *tx_ring); +static void grtnic_clean_rx_ring(struct grtnic_ring *rx_ring); + +#ifdef NETIF_F_RXHASH +static inline void grtnic_rx_hash(struct grtnic_ring *ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ + u16 rss_type; + + if (!(netdev_ring(ring)->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 0x0f; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (rss_type & 0xc0) ? //tcp or udp + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} +#endif /* NETIF_F_RXHASH */ + +static int grtnic_desc_unused(struct grtnic_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +static inline void grtnic_release_rx_desc(struct grtnic_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; +#endif + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); //rx_ring->tail, 这个地方别忘记设置,desc要在clean_rx_irq里面清0 +} + +///////////////////////////////////////////////////////////////////////////////////////////////////// +#ifdef CONFIG_DISABLE_PACKET_SPLIT +static bool grtnic_alloc_mapped_skb(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *buffer_info) +{ + struct sk_buff *skb = buffer_info->skb; + dma_addr_t dma = buffer_info->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), rx_ring->rx_buffer_len); + + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + buffer_info->skb = skb; + } + + dma = dma_map_single(rx_ring->dev, skb->data, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + buffer_info->dma = dma; + buffer_info->length = rx_ring->rx_buffer_len; + return true; +} + +#else /* CONFIG_DISABLE_PACKET_SPLIT */ + +static inline unsigned int grtnic_rx_offset(struct grtnic_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? GRTNIC_SKB_PAD : 0; +} + +static bool grtnic_alloc_mapped_page(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *buffer_info) +{ + struct page *page = buffer_info->page; + dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(grtnic_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, grtnic_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + buffer_info->dma = dma; + buffer_info->page = page; + buffer_info->page_offset = grtnic_rx_offset(rx_ring); +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + page_ref_add(page, USHRT_MAX - 1); + buffer_info->pagecnt_bias = USHRT_MAX; +#else + buffer_info->pagecnt_bias = 1; +#endif + rx_ring->rx_stats.alloc_rx_page++; +// buffer_info->length = grtnic_rx_bufsz(rx_ring); +// buffer_info->length = GRTNIC_RX_BUFSZ; //注意,这里告知asic缓冲区大小不是整个page,因为整个page可能有几个缓冲区 + +// printk("offset = %d, length = %d\n", buffer_info->page_offset, buffer_info->length); + + return true; +} +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ +///////////////////////////////////////////////////////////////////////////////////////////////////// + +void grtnic_alloc_rx_buffers(struct grtnic_ring *rx_ring, u16 cleaned_count) +{ + union grtnic_rx_desc *rx_desc; + struct grtnic_rx_buffer *buffer_info; + u16 i = rx_ring->next_to_use; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + u16 bufsz; +#endif + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = GRTNIC_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + bufsz = grtnic_rx_bufsz(rx_ring); +#endif + + do { +#ifdef CONFIG_DISABLE_PACKET_SPLIT + if (!grtnic_alloc_mapped_skb(rx_ring, buffer_info)) + break; +#else + if (!grtnic_alloc_mapped_page(rx_ring, buffer_info)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, buffer_info->dma, + buffer_info->page_offset, bufsz, + DMA_FROM_DEVICE); +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + rx_desc->read.src_addr = cpu_to_le64(buffer_info->dma); + rx_desc->read.len_ctl.len = cpu_to_le16(buffer_info->length); +#else + rx_desc->read.src_addr = cpu_to_le64(buffer_info->dma + buffer_info->page_offset); + rx_desc->read.len_ctl.len = cpu_to_le16(bufsz); +#endif + rx_desc->read.len_ctl.desc_num = 0; + rx_desc->read.len_ctl.chl = 0; + rx_desc->read.len_ctl.cmp = 0; + rx_desc->read.len_ctl.sop = 0; + rx_desc->read.len_ctl.eop = 0; + + rx_desc++; + buffer_info++; + i++; + + if (unlikely(!i)) { + rx_desc = GRTNIC_RX_DESC(*rx_ring, 0); + buffer_info = &rx_ring->rx_buffer_info[0]; + i -= rx_ring->count; + } + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + grtnic_release_rx_desc(rx_ring, i); +} + +static inline bool grtnic_container_is_rx(struct grtnic_q_vector *q_vector, struct grtnic_ring_container *rc) +{ + return &q_vector->rx == rc; +} +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void grtnic_update_itr(struct grtnic_q_vector *q_vector, struct grtnic_ring_container *ring_container) +{ + unsigned int itr = ITR_ADAPTIVE_MIN_USECS | ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned long next_update = jiffies; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, ring_container->next_update)) + goto clear_counts; + + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + + if (grtnic_container_is_rx(q_vector, ring_container)) { + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (packets && packets < 24 && bytes < 12112) { + itr = ITR_ADAPTIVE_LATENCY; + avg_wire_size = (bytes + packets * 24) * 2; + avg_wire_size = clamp_t(unsigned int, avg_wire_size, 2560, 12800); + goto adjust_for_speed; + } + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + itr = (q_vector->itr >> 2) + ITR_ADAPTIVE_MIN_INC; + if (itr > ITR_ADAPTIVE_MAX_USECS) + itr = ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & ITR_ADAPTIVE_LATENCY; + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = q_vector->itr >> 2; + goto clear_counts; + } + + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = q_vector->itr >> 3; + if (itr < ITR_ADAPTIVE_MIN_USECS) + itr = ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + + if (q_vector->adapter->speed == 1) //10G + itr += DIV_ROUND_UP(avg_wire_size, ITR_ADAPTIVE_MIN_INC * 256) * ITR_ADAPTIVE_MIN_INC; + else //1G + itr += DIV_ROUND_UP(avg_wire_size, ITR_ADAPTIVE_MIN_INC * 64) * ITR_ADAPTIVE_MIN_INC; + + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - ITR_ADAPTIVE_MIN_INC; + +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + + ring_container->total_bytes = 0; + ring_container->total_packets = 0; +} + +void grtnic_write_itr (struct grtnic_q_vector *q_vector) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & MAX_EITR; + + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_ITR*4), (v_idx<<16 | itr_reg), 1); +} + + +static void grtnic_set_itr(struct grtnic_q_vector *q_vector) +{ + u32 new_itr; + + grtnic_update_itr(q_vector, &q_vector->tx); + grtnic_update_itr(q_vector, &q_vector->rx); + + /* use the smallest value of new ITR delay calculations */ + new_itr = min(q_vector->rx.itr, q_vector->tx.itr); + + /* Clear latency flag if set, shift into correct position */ + new_itr &= ITR_ADAPTIVE_MASK_USECS; + new_itr <<= 2; + + if (new_itr != q_vector->itr) { + /* save the algorithm value here */ + q_vector->itr = new_itr; +// printk("new_itr = %d\n", new_itr); + grtnic_write_itr(q_vector); + } +} + +#ifdef CONFIG_DISABLE_PACKET_SPLIT +/** + * ixgbe_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *grtnic_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = GRTNIC_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + GRTNIC_CB(tail)->head = NULL; + + return head; +} + +/** + * ixgbe_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via ixgbe_merge_active_tail. + **/ +static inline void grtnic_add_active_tail(struct sk_buff *head, struct sk_buff *tail) +{ + struct sk_buff *old_tail = GRTNIC_CB(head)->tail; + + if (old_tail) { + grtnic_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + GRTNIC_CB(tail)->head = head; + GRTNIC_CB(head)->tail = tail; +} + +/** + * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool grtnic_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = GRTNIC_CB(head)->tail; + + if (!tail) + return false; + + grtnic_merge_active_tail(tail); + + GRTNIC_CB(head)->tail = NULL; + + return true; +} + +#endif + + +static void grtnic_process_skb_fields(struct grtnic_ring *rx_ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ + struct net_device *netdev = netdev_ring(rx_ring); + u8 TCPCS, UDPCS, IPCS, CSUM_OK, UDP_CSUM_FLAG; + +#ifdef NETIF_F_RXHASH + grtnic_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ + + CSUM_OK = rx_desc->wb.upper.rx_info.csum_ok; + IPCS = rx_desc->wb.upper.rx_info.ipcs; + TCPCS = rx_desc->wb.upper.rx_info.tcpcs; + UDPCS = rx_desc->wb.upper.rx_info.udpcs; + UDP_CSUM_FLAG = rx_desc->wb.upper.rx_info.udp_csum_flag; + +// printk("CSUM_OK=%d, IPCS=%d, TCPCS=%d, UDPCS=%d, UDP_CSUM_FLAG=%d\n", CSUM_OK, IPCS, TCPCS, UDPCS, UDP_CSUM_FLAG); + + if((netdev->features & NETIF_F_RXCSUM) && IPCS) //is ip protocol + { + if((TCPCS & CSUM_OK) || (UDPCS & CSUM_OK & UDP_CSUM_FLAG)) //UDP_CSUM_FLAG means: udp checksum not is 0 + { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + else if(TCPCS || (UDPCS & UDP_CSUM_FLAG)) + { + printk("CSUM_OK=%d, IPCS=%d, TCPCS=%d, UDPCS=%d, UDP_CSUM_FLAG=%d\n", CSUM_OK, IPCS, TCPCS, UDPCS, UDP_CSUM_FLAG); + rx_ring->rx_stats.csum_err++; + } + } + + skb_record_rx_queue(skb, ring_queue_index(rx_ring)); + + skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); +} + + +void grtnic_rx_skb(struct grtnic_q_vector *q_vector, + struct grtnic_ring *rx_ring, + union grtnic_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_NDO_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (grtnic_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif + + napi_gro_receive(&q_vector->napi, skb); + +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; +#endif +} + + +static bool grtnic_is_non_eop(struct grtnic_ring *rx_ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + struct sk_buff *next_skb; +#endif + + u32 ntc = rx_ring->next_to_clean + 1; + + rx_desc->wb.upper.len_ctl.cmp = 0; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(GRTNIC_RX_DESC(*rx_ring, ntc)); + + if (likely(rx_desc->wb.upper.len_ctl.eop)) + return false; + + /* place skb in next buffer to be received */ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + next_skb = rx_ring->rx_buffer_info[ntc].skb; + + grtnic_add_active_tail(skb, next_skb); + GRTNIC_CB(next_skb)->head = skb; +#else + rx_ring->rx_buffer_info[ntc].skb = skb; +#endif + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +#ifdef CONFIG_DISABLE_PACKET_SPLIT +/* grtnic_clean_rx_irq -- * legacy */ +static int grtnic_clean_rx_irq(struct grtnic_q_vector *q_vector, int budget) +{ + struct grtnic_ring *rx_ring = q_vector->rx.ring; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +//#if IS_ENABLED(CONFIG_FCOE) +// int ddp_bytes; +// unsigned int mss = 0; +//#endif /* CONFIG_FCOE */ + u16 len = 0; + u16 cleaned_count = grtnic_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + struct grtnic_rx_buffer *rx_buffer; + union grtnic_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= GRTNIC_RX_BUFFER_WRITE) { + grtnic_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = GRTNIC_RX_DESC(*rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!rx_desc->wb.upper.len_ctl.cmp) + break; + +// printk("rx len = %d, desc_num = %d, chl = %d, cmp = %d, rs = %d, irq = %d, eop = %d, sop = %d\n", rx_desc->len_ctl.len, +// rx_desc->len_ctl.desc_num,rx_desc->len_ctl.chl,rx_desc->len_ctl.cmp,rx_desc->len_ctl.rs,rx_desc->len_ctl.irq, +// rx_desc->len_ctl.eop,rx_desc->len_ctl.sop); + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + len = le16_to_cpu(rx_desc->wb.upper.len_ctl.len); + /* pull the header of the skb in */ + __skb_put(skb, len); + +// printk("rx len = %d\n", len); + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + if (!GRTNIC_CB(skb)->head) { + GRTNIC_CB(skb)->dma = rx_buffer->dma; + } else { + skb = grtnic_merge_active_tail(skb); + dma_unmap_single(rx_ring->dev, rx_buffer->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + } + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (grtnic_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + dma_unmap_single(rx_ring->dev, GRTNIC_CB(skb)->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + GRTNIC_CB(skb)->dma = 0; + + if (grtnic_close_active_frag_list(skb) && !GRTNIC_CB(skb)->append_cnt) { + /* if we got here without RSC the packet is invalid */ + dev_kfree_skb_any(skb); + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + grtnic_process_skb_fields(rx_ring, rx_desc, skb); + + grtnic_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + grtnic_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_rx_packets; +} + +#else /* CONFIG_DISABLE_PACKET_SPLIT */ + +static void grtnic_reuse_rx_page(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *old_buff) +{ + struct grtnic_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static inline bool grtnic_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool grtnic_can_reuse_rx_page(struct grtnic_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(grtnic_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +#else + if (unlikely((page_count(page) - pagecnt_bias) > 1)) +#endif + return false; +#else + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define GRTNIC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - GRTNIC_RXBUFFER_3K) + if (rx_buffer->page_offset > GRTNIC_LAST_OFFSET) + return false; +#endif + +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } +#else + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } +#endif + + return true; +} + +static void grtnic_add_rx_frag(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *rx_buffer, struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = grtnic_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(GRTNIC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static void grtnic_dma_sync_frag(struct grtnic_ring *rx_ring, struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(GRTNIC_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, GRTNIC_CB(skb)->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + GRTNIC_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + GRTNIC_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } +} + + +/////////////////////////////////////////////////////////////// + +static struct grtnic_rx_buffer *grtnic_get_rx_buffer(struct grtnic_ring *rx_ring, + union grtnic_rx_desc *rx_desc, struct sk_buff **skb, const unsigned int size) +{ + struct grtnic_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!likely(rx_desc->wb.upper.len_ctl.eop)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + grtnic_dma_sync_frag(rx_ring, *skb); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void grtnic_put_rx_buffer(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *rx_buffer, struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + if (grtnic_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + grtnic_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (!IS_ERR(skb) && GRTNIC_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + GRTNIC_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + } + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +static struct sk_buff *grtnic_construct_skb(struct grtnic_ring *rx_ring, + struct grtnic_rx_buffer *rx_buffer, + union grtnic_rx_desc *rx_desc, + unsigned int size) +{ + + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = grtnic_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(GRTNIC_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, GRTNIC_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + if (size > GRTNIC_RX_HDR_SIZE) { + if (!likely(rx_desc->wb.upper.len_ctl.eop)) + GRTNIC_CB(skb)->dma = rx_buffer->dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +static struct sk_buff *grtnic_build_skb(struct grtnic_ring *rx_ring, + struct grtnic_rx_buffer *rx_buffer, + union grtnic_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = grtnic_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(GRTNIC_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(va - GRTNIC_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, GRTNIC_SKB_PAD); + __skb_put(skb, size); + + /* record DMA address if this is the start of a chain of buffers */ + if (!likely(rx_desc->wb.upper.len_ctl.eop)) + GRTNIC_CB(skb)->dma = rx_buffer->dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ + +static void grtnic_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, GRTNIC_RX_HDR_SIZE); +// pull_len = eth_get_headlen(va, GRTNIC_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static bool grtnic_cleanup_headers(struct grtnic_ring *rx_ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + grtnic_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/* grtnic_clean_rx_irq -- * packet split */ +static int grtnic_clean_rx_irq(struct grtnic_q_vector *q_vector, int budget) +{ + struct grtnic_ring *rx_ring = q_vector->rx.ring; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +//#if IS_ENABLED(CONFIG_FCOE) +// int ddp_bytes; +// unsigned int mss = 0; +//#endif /* CONFIG_FCOE */ + u16 cleaned_count = grtnic_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + union grtnic_rx_desc *rx_desc; + struct grtnic_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= GRTNIC_RX_BUFFER_WRITE) { + grtnic_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = GRTNIC_RX_DESC(*rx_ring, rx_ring->next_to_clean); + if (!rx_desc->wb.upper.len_ctl.cmp) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + size = le16_to_cpu(rx_desc->wb.upper.len_ctl.len); + rx_buffer = grtnic_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + /* retrieve a buffer from the ring */ + if (skb) { + grtnic_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = grtnic_build_skb(rx_ring, rx_buffer, rx_desc, size); +#endif + } else { + skb = grtnic_construct_skb(rx_ring, rx_buffer, rx_desc, size); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + grtnic_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (grtnic_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (grtnic_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + grtnic_process_skb_fields(rx_ring, rx_desc, skb); + + grtnic_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifdef HAVE_NDO_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int grtnic_busy_poll_recv(struct napi_struct *napi) +{ + struct grtnic_q_vector *q_vector = + container_of(napi, struct grtnic_q_vector, napi); + struct grtnic_adapter *adapter = q_vector->adapter; + int found = 0; + + if (test_bit(__GRTNIC_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!grtnic_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + found = grtnic_clean_rx_irq(q_vector, 4); +#ifdef BP_EXTENDED_STATS + if (found) + q_vector->rx.ring->stats.cleaned += found; + else + q_vector->rx.ring->stats.misses++; +#endif +// if (found) +// break; + + grtnic_qv_unlock_poll(q_vector); + + return found; +} + +#endif /* HAVE_NDO_BUSY_POLL */ + +static bool grtnic_clean_tx_irq_reg(struct grtnic_q_vector *q_vector, int napi_budget) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_ring *tx_ring = q_vector->tx.ring; + struct grtnic_tx_buffer *tx_buffer; + union grtnic_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__GRTNIC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = GRTNIC_TX_DESC(*tx_ring, i); + i -= tx_ring->count; + + do { + union grtnic_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + if (!eop_desc->wb.len_ctl.cmp) + break; + +// printk("tx len = %d, desc_num = %d, chl = %d, cmp = %d, rs = %d, irq = %d, eop = %d, sop = %d\n", tx_desc->len_ctl.len, +// tx_desc->len_ctl.desc_num,tx_desc->len_ctl.chl,tx_desc->len_ctl.cmp,tx_desc->len_ctl.rs,tx_desc->len_ctl.irq, +// tx_desc->len_ctl.eop,tx_desc->len_ctl.sop); + + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + +// printk("next_to_clean = %d\n", i); + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(netdev_ring(tx_ring)) && + (grtnic_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(netdev_ring(tx_ring), + ring_queue_index(tx_ring)) + && !test_bit(__GRTNIC_DOWN, &q_vector->adapter->state)) { + netif_wake_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); + ++tx_ring->tx_stats.restart_queue; + } +#else + if (netif_queue_stopped(netdev_ring(tx_ring)) && + !test_bit(__GRTNIC_DOWN, &q_vector->adapter->state)) { + netif_wake_queue(netdev_ring(tx_ring)); + ++tx_ring->tx_stats.restart_queue; + } +#endif + } + + return !!budget; +} + + +/** + * grtnic_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +int grtnic_poll(struct napi_struct *napi, int budget) +{ + struct grtnic_q_vector *q_vector = container_of(napi, struct grtnic_q_vector, napi); + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_hw *hw = &adapter->hw; + int work_done = 0; + bool clean_complete = true; + u32 var; + +// bool clean_complete = true; +// int work_done = 0; +// int cleaned = 0; + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) + grtnic_update_dca(q_vector); +#endif /* CONFIG_DCA */ + + + if (q_vector->tx.ring) + { + if(!grtnic_clean_tx_irq_reg(q_vector, budget)) + clean_complete = false; + } + +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; + + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !grtnic_qv_lock_napi(q_vector)) + return budget; +#else + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; +#endif + + if (q_vector->rx.ring) + { + int cleaned = grtnic_clean_rx_irq(q_vector, budget); + work_done += cleaned; + + if (cleaned >= budget) + clean_complete = false; + } + + +#ifdef HAVE_NDO_BUSY_POLL + grtnic_qv_unlock_napi(q_vector); +#endif +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; +#endif + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + /* all work done, exit the polling mode */ + if (likely(napi_complete_done(napi, work_done))) { + if (adapter->rx_itr_setting == 1) + grtnic_set_itr(q_vector); + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) + { + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + var = q_vector->eims_value; + else + var = ~0; + + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), var, 1); + } + } + return min(work_done, budget - 1); +} + + +static void grtnic_trigger_lsc(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), adapter->eims_other, 1); //trigger user interrupt +} +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +int grtnic_setup_tx_resources(struct grtnic_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int node = -1; + int size; + + size = sizeof(struct grtnic_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + node = tx_ring->q_vector->node; + + tx_ring->tx_buffer_info = vmalloc_node(size, node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) + goto err_tx_buffer; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union grtnic_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err_tx_ring_dma; + + + set_dev_node(dev, node); + tx_ring->desc_wb = dma_alloc_coherent(dev, sizeof(struct grtnic_desc_wb), &tx_ring->desc_wb_dma, GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc_wb) + tx_ring->desc_wb = dma_alloc_coherent(dev, sizeof(struct grtnic_desc_wb), &tx_ring->desc_wb_dma, GFP_KERNEL); + + if (!tx_ring->desc_wb) + goto err_tx_ring_wb; + + ((struct grtnic_desc_wb *) tx_ring->desc_wb)->desc_hw_ptr = 0; + +// tx_ring->next_to_use = 0; //检查一下这里,其他地方设置了,这里就不需要了 +// tx_ring->next_to_clean = 0; +// +//#ifndef CONFIG_DISABLE_PACKET_SPLIT +// tx_ring->next_to_alloc = 0; +//#endif + + return 0; +err_tx_ring_wb: + dma_free_coherent(dev, tx_ring->size, tx_ring->desc, tx_ring->dma); +err_tx_ring_dma: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; +err_tx_buffer: + printk("Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +int grtnic_setup_rx_resources(struct grtnic_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int node = -1; + int size; + + size = sizeof(struct grtnic_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + node = rx_ring->q_vector->node; + + rx_ring->rx_buffer_info = vmalloc_node(size, node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) + goto err_rx_buffer; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union grtnic_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err_rx_ring_dma; + +// rx_ring->next_to_clean = 0; //检查一下这里,其他地方设置了,这里就不需要了 +// rx_ring->next_to_use = 0; +// +//#ifndef CONFIG_DISABLE_PACKET_SPLIT +// rx_ring->next_to_alloc = 0; +//#endif + + return 0; + +err_rx_ring_dma: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; +err_rx_buffer: + printk("Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; +} + + +void grtnic_free_tx_resources(struct grtnic_ring *tx_ring) +{ + grtnic_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + + dma_free_coherent(tx_ring->dev, sizeof(struct grtnic_desc_wb), + tx_ring->desc_wb, tx_ring->desc_wb_dma); + tx_ring->desc = NULL; +} + +void grtnic_free_rx_resources(struct grtnic_ring *rx_ring) +{ + grtnic_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * grtnic_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int grtnic_setup_all_tx_resources(struct grtnic_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + + + err = grtnic_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + grtnic_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * grtnic_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int grtnic_setup_all_rx_resources(struct grtnic_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = grtnic_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; + +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + grtnic_free_rx_resources(adapter->rx_ring[i]); + return err; +} + + +/** + * grtnic_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void grtnic_free_all_tx_resources(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + grtnic_free_tx_resources(adapter->tx_ring[i]); +} + + +/** + * grtnic_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void grtnic_free_all_rx_resources(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + grtnic_free_rx_resources(adapter->rx_ring[i]); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +/** + * grtnic_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void grtnic_configure_tx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 w; + u32 txdctl = (1u << 25); /* LWTHRESH */ + u8 reg_idx = ring->reg_idx; + + /* flush pending descriptor writebacks to memory */ +// GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (TX_INT_DELAY | GRTNIC_TIDV_FPD), 1); + /* execute the writes immediately */ + GRTNIC_WRITE_FLUSH(hw); + + /* write lower 32-bit of bus address of transfer first descriptor */ + w = cpu_to_le32(PCI_DMA_L(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_ADDRLO*4), w, 1); + /* write upper 32-bit of bus address of transfer first descriptor */ + w = cpu_to_le32(PCI_DMA_H(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_ADDRHI*4), w, 1); + /* write lower 32-bit of bus address of desc write back address*/ + w = cpu_to_le32(PCI_DMA_L(ring->desc_wb_dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_WBADDRLO*4), w, 1); + /* write upper 32-bit of bus address of desc write back address*/ + w = cpu_to_le32(PCI_DMA_H(ring->desc_wb_dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_WBADDRHI*4), w, 1); + + /* setup max SG num */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_MAXNUM*4), ring->count, 1); +// /* Set the Tx Interrupt Delay register TIDV */ 前面为了flush,已经设置过了,这里就不用了 + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), TX_INT_DELAY, 1); +// write_register(tx_int_delay, adapter->dma_bar+ (TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4); + + ring->tail = hw->dma_bar + (TARGET_H2C<<12) + (reg_idx <<8) + (ADDR_SG_SWPT*4); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + if (!ring->q_vector || (ring->q_vector->itr < GRTNIC_100K_ITR)) + txdctl |= (1 << 16); /* WTHRESH = 1 */ + else + txdctl |= (8 << 16); /* WTHRESH = 8 */ + + /* + * Setting PTHRESH to 32 both improves performance + * and avoids a TX hang with DFP enabled + */ + txdctl |= (1 << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ + + //PTHRESH=32, HTHRESH=1, WTHRESH=1,LWTHRESH=1 预读的方式就是等待其它都不忙的时候才进行描述符指令发出 +// write_register(GRTNIC_TXDCTL_DMA_BURST_ENABLE, adapter->dma_bar+ (TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4), txdctl, 1); +// GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4), GRTNIC_TXDCTL_DMA_BURST_ENABLE, 1); + + /* initialize XPS */ + if (!test_and_set_bit(__GRTNIC_TX_XPS_INIT_DONE, &ring->state)) { + struct grtnic_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, get_cpu_mask(reg_idx % adapter->rss_queues), ring->queue_index); +// netif_set_xps_queue(adapter->netdev, &q_vector->affinity_mask, ring->queue_index); + } + + clear_bit(__GRTNIC_HANG_CHECK_ARMED, &ring->state); + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct grtnic_tx_buffer) * ring->count); + + /* TX dma engine start */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x01, 1); +} + +/** + * grtnic_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ + +static void grtnic_configure_tx(struct grtnic_adapter *adapter) +{ + u32 i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + grtnic_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + + +void grtnic_configure_rx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 w; + u32 rxdctl = (1u << 25); /* LWTHRESH */ + u8 reg_idx = ring->reg_idx; + + union grtnic_rx_desc *rx_desc; + + /* flush pending descriptor writebacks to memory */ +// GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (RX_INT_DELAY | GRTNIC_RDTR_FPD), 1); + /* execute the writes immediately */ + GRTNIC_WRITE_FLUSH(hw); + + w = cpu_to_le32(PCI_DMA_L(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_SG_ADDRLO*4), w, 1); + /* write upper 32-bit of bus address of transfer first descriptor */ + w = cpu_to_le32(PCI_DMA_H(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_SG_ADDRHI*4), w, 1); + + /* setup max SG num */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_SG_MAXNUM*4), ring->count, 1); +// /* set the Receive Delay Timer Register RDTR #define BURST_RDTR 0x20 */ /*前面为了flush,已经执行过了,这些就不再执行了*/ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), RX_INT_DELAY, 1); +// write_register(rx_int_delay, adapter->dma_bar+ (TARGET_C2H<<12) + (channel<<8) + ADDR_INT_DELAY*4); + + ring->tail = hw->dma_bar + (TARGET_C2H<<12) + (reg_idx <<8) + (ADDR_SG_SWPT*4); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + ring->next_to_alloc = 0; +#endif + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct grtnic_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = GRTNIC_RX_DESC(*ring, 0); + rx_desc->wb.upper.len_ctl.cmp = 0; + + rxdctl = GRTNIC_RXDCTL_DMA_BURST_ENABLE; + //PTHRESH=32, HTHRESH=4, WTHRESH=4, LWTHRESH=1 这与intel功能不同,当描述符数量低于LWTHRESH时候,优先级最高,立刻进行读描述符,不关系bus是不是busy,否则采取预读的方式,优先级最低 + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4), rxdctl, 1); + + /* RX dma engine start */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x01, 1); + + grtnic_alloc_rx_buffers(ring, grtnic_desc_unused(ring)); +} + + +static void grtnic_set_rx_buffer_len(struct grtnic_adapter *adapter) +{ + struct grtnic_ring *rx_ring; + int i; + +#if defined(CONFIG_DISABLE_PACKET_SPLIT) || (defined (HAVE_SWIOTLB_SKIP_CPU_SYNC) && (PAGE_SIZE < 8192)) + int max_frame = adapter->max_frame_size; +#endif + +#ifdef CONFIG_DISABLE_PACKET_SPLIT + max_frame += VLAN_HLEN; + if(max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) + max_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + else + max_frame = ALIGN(max_frame, 1024); +#endif + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + clear_bit(__GRTNIC_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__GRTNIC_RX_BUILD_SKB_ENABLED, &rx_ring->state); + rx_ring->rx_buffer_len = GRTNIC_RXBUFFER_2K; + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + set_bit(__GRTNIC_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (GRTNIC_2K_TOO_SMALL_WITH_PADDING || (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + { + set_bit(__GRTNIC_RX_3K_BUFFER, &rx_ring->state); + rx_ring->rx_buffer_len = GRTNIC_RXBUFFER_3K; + } + +#endif /* PAGE_SIZE < 8192*/ +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + rx_ring->rx_buffer_len = max_frame; +#endif /*!CONFIG_DISABLE_PACKET_SPLIT*/ + } +} + +/** + * grtnic_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void grtnic_configure_rx(struct grtnic_adapter *adapter) +{ + int i; + + /* Program registers for the distribution of queues */ + grtnic_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + grtnic_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + grtnic_configure_rx_ring(adapter, adapter->rx_ring[i]); + + /* enable all receives */ + grtnic_SetRx(adapter->netdev, 1); //start rx +} + + +static void grtnic_configure(struct grtnic_adapter *adapter) +{ + +//#if IS_ENABLED(CONFIG_DCA) +// /* configure DCA */ +// if (adapter->flags & FLAG_DCA_CAPABLE) +// grtnic_setup_dca(adapter); +//#endif + + grtnic_configure_tx(adapter); + grtnic_configure_rx(adapter); +} + +static void grtnic_up_complete(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 temp; + +// ixgbe_get_hw_control(adapter); +// ixgbe_setup_gpie(adapter); + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + grtnic_configure_msix(adapter); + else + grtnic_configure_msi_and_legacy(adapter); + + if(adapter->ei->type == board_902T_GRT_FF) + { + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, &temp); //prtad_reg + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, temp | PHY_RESET); //rst phy + } + else + { + /* enable the optics for 82599 SFP+ fiber */ + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x00, 0); //enable laser; + } + + smp_mb__before_atomic(); + clear_bit(__GRTNIC_DOWN, &adapter->state); + grtnic_napi_enable_all(adapter); +//#ifndef IXGBE_NO_LLI +// grtnic_configure_lli(adapter); +//#endif + + /* clear any pending interrupts, may auto mask */ + GRTNIC_READ_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_VECTOR*4), 1); + grtnic_irq_enable(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= GRTNIC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + +// +// ixgbe_clear_vf_stats_counters(adapter); +// /* Set PF Reset Done bit so PF/VF Mail Ops can work */ +// ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); +// ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; +// IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); +// +// /* update setting rx tx for all active vfs */ +// ixgbe_set_all_vfs(adapter); +} + +void grtnic_reset(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + GRTNIC_READ_REG(hw, ((TARGET_CONFIG<<12) + ADDR_FUNC_RST*4), 1); //function reset; +} + +int grtnic_open(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__GRTNIC_TESTING, &adapter->state)) + return -EBUSY; + + grtnic_SetRx(netdev, 0); //stop rx + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = grtnic_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = grtnic_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + grtnic_configure(adapter); + + err = grtnic_request_irq(adapter); + if (err) + goto err_req_irq; + + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + + grtnic_up_complete(adapter); + + grtnic_SetTx(netdev, 1); //start tx + grtnic_trigger_lsc(adapter); //Fire a link status change interrupt to start the watchdog. + + return GRTNIC_SUCCESS; + + +err_set_queues: + grtnic_free_irq(adapter); +err_req_irq: + grtnic_free_all_rx_resources(adapter); +err_setup_rx: + grtnic_free_all_tx_resources(adapter); +err_setup_tx: + grtnic_reset(adapter); + + return err; +} +/////////////////////////////////////////////////////////////////////////////// + + +void grtnic_disable_rx_queue(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int i; + + /* disable receives */ + grtnic_SetRx(netdev, 0); //stop rx + + /* disable all enabled Rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct grtnic_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + /* flush pending descriptor writebacks to memory */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (RX_INT_DELAY | GRTNIC_RDTR_FPD), 1); + /* channel stop */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x00, 1); + } +} + +void grtnic_disable_tx_queue(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + int i; + + /* disable all enabled Tx queues */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + /* flush pending descriptor writebacks to memory */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (TX_INT_DELAY | GRTNIC_TIDV_FPD), 1); + /* channel stop */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x00, 1); + } +} + +/** + * grtnic_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void grtnic_clean_tx_ring(struct grtnic_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct grtnic_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + unsigned int size; + + + while (i != tx_ring->next_to_use) { + union grtnic_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = GRTNIC_TX_DESC(*tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct grtnic_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + + +/** + * grtnic_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void grtnic_clean_rx_ring(struct grtnic_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct grtnic_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + unsigned int size; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + + /* Free all the Rx ring sk_buffs */ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + while (i != rx_ring->next_to_use) { +#else + while (i != rx_ring->next_to_alloc) { +#endif + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + if (GRTNIC_CB(skb)->page_released) + dma_unmap_page_attrs(rx_ring->dev, + GRTNIC_CB(skb)->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif +#else + /* We need to clean up RSC frag lists */ + skb = grtnic_merge_active_tail(skb); + if (grtnic_close_active_frag_list(skb)) + dma_unmap_single(rx_ring->dev, GRTNIC_CB(skb)->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + GRTNIC_CB(skb)->dma = 0; +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + grtnic_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); +#else /* CONFIG_DISABLE_PACKET_SPLIT */ + if (rx_buffer->dma) { + dma_unmap_single(rx_ring->dev, rx_buffer->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + size = sizeof(struct grtnic_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + rx_ring->next_to_alloc = 0; +#endif + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * grtnic_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void grtnic_clean_all_tx_rings(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + grtnic_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * grtnic_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void grtnic_clean_all_rx_rings(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + grtnic_clean_rx_ring(adapter->rx_ring[i]); +} + + +void grtnic_down(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 temp; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__GRTNIC_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + /* Shut off incoming Tx traffic */ + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + + /* Disable Rx */ + grtnic_disable_rx_queue(adapter); + + grtnic_irq_disable(adapter); + + grtnic_napi_disable_all(adapter); + + clear_bit(__GRTNIC_RESET_REQUESTED, &adapter->state); + adapter->flags &= ~GRTNIC_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + /* disable transmits in the hardware now that interrupts are off */ + grtnic_disable_tx_queue(adapter); + +#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) +#endif + grtnic_reset(adapter); + + if(adapter->ei->type == board_902T_GRT_FF) + { + grtnic_PhyRead(netdev, phy_addr, 0x00, &temp); //prtad_reg + grtnic_PhyWrite(netdev, phy_addr, 0x00, temp | PHY_POWER_DOWN); //power down + } + else + { + /* power down the optics for 82599 SFP+ fiber */ + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x01, 0); //disable laser; + } + + grtnic_clean_all_tx_rings(adapter); + grtnic_clean_all_rx_rings(adapter); +} + + +void grtnic_up(struct grtnic_adapter *adapter) +{ + + /* hardware has been reset, we need to reload some things */ + grtnic_configure(adapter); + + grtnic_up_complete(adapter); +} + +void grtnic_reinit_locked(struct grtnic_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE + netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif + + while (test_and_set_bit(__GRTNIC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + grtnic_down(adapter); + grtnic_up(adapter); + clear_bit(__GRTNIC_RESETTING, &adapter->state); +} + +void grtnic_do_reset(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + grtnic_reinit_locked(adapter); + else + grtnic_reset(adapter); +} + +/** + * grtnic_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +void grtnic_close_suspend(struct grtnic_adapter *adapter) +{ + grtnic_down(adapter); + grtnic_free_irq(adapter); + + grtnic_free_all_rx_resources(adapter); + grtnic_free_all_tx_resources(adapter); +} + + +int grtnic_close(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + if (netif_device_present(netdev)) + grtnic_close_suspend(adapter); + + return 0; +} + +static int __grtnic_maybe_stop_tx(struct grtnic_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(grtnic_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int grtnic_maybe_stop_tx(struct grtnic_ring *tx_ring, u16 size) +{ + if (likely(grtnic_desc_unused(tx_ring) >= size)) + return 0; + + return __grtnic_maybe_stop_tx(tx_ring, size); +} + + +netdev_tx_t grtnic_xmit_frame_ring (struct sk_buff *skb, + struct grtnic_adapter __maybe_unused *adapter, + struct grtnic_ring *tx_ring) + +{ + struct grtnic_tx_buffer *first, *tx_buffer; + union grtnic_tx_desc *tx_desc; + unsigned int i, f; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + unsigned int csum_info = 0; + +//////////////////////////////////////////////////////// + + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f])); + + if (grtnic_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + i = tx_ring->next_to_use; + first = &tx_ring->tx_buffer_info[i]; + + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; +/////////////////////////////////////////////////////////////////// + tx_desc = GRTNIC_TX_DESC(*tx_ring, i); + + memset(&tx_desc->read.len_ctl, 0, sizeof(tx_desc->read.len_ctl)); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + { + unsigned int csum_start = skb_checksum_start_offset(skb); + unsigned int csum_offset = skb->csum_offset; + + if (csum_start > 255 || csum_offset > 127) + { + if (skb_checksum_help(skb)) //soft calc csum + csum_info = 0; //disable hw csum + } + else + { + csum_info = (csum_offset << 8) | (csum_start); + } + } + else + { + csum_info = 0; + } + + tx_desc->read.len_ctl.sop = 1; + tx_desc->read.tx_info.csum_info = csum_info; + +////////////////////////////////////////////////////////////////////////////// + size = skb_headlen(skb); + data_len = skb->data_len; + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + tx_buffer->tx_flags = 0; + + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.src_addr = cpu_to_le64(dma); + + while (unlikely(size > GRTNIC_MAX_DATA_PER_TXD)) { + tx_desc->read.len_ctl.len = cpu_to_le32(GRTNIC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + i = 0; + } + memset(&tx_desc->read.len_ctl, 0, sizeof(tx_desc->read.len_ctl)); +// tx_desc->read.olinfo_status = 0; + + dma += GRTNIC_MAX_DATA_PER_TXD; + size -= GRTNIC_MAX_DATA_PER_TXD; + + tx_desc->read.src_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.len_ctl.len = cpu_to_le32(size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + i = 0; + } + memset(&tx_desc->read.len_ctl, 0, sizeof(tx_desc->read.len_ctl)); +// tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + tx_desc->read.len_ctl.eop = 1; + tx_desc->read.len_ctl.irq = 1; + tx_desc->read.len_ctl.rs = 1; + tx_desc->read.len_ctl.len = cpu_to_le32(size); + + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = first->time_stamp; +#endif + + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + grtnic_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + +// printk("next_to_use = %d\n", i); + +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ + } + + return 0; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static netdev_tx_t grtnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_ring *tx_ring; +#ifdef HAVE_TX_MQ + unsigned int r_idx = skb->queue_mapping; +#endif + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb_put_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + +#ifdef HAVE_TX_MQ + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; +#else + tx_ring = adapter->tx_ring[0]; +#endif + + return grtnic_xmit_frame_ring(skb, adapter, tx_ring); + +} + +static void grtnic_check_lsc(struct grtnic_adapter *adapter) +{ + adapter->lsc_int++; +// printk("lsc = %d\n", adapter->lsc_int); + adapter->flags |= GRTNIC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) + grtnic_service_event_schedule(adapter); +} + + +irqreturn_t grtnic_isr (int __always_unused irq, void *data) +{ + struct grtnic_adapter *adapter = data; + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_q_vector *q_vector = adapter->q_vector[0]; + u32 irq_vector; + + /* read ICR disables interrupts using IAM */ + irq_vector = GRTNIC_READ_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_VECTOR*4), 1); + + if (!(adapter->flags & GRTNIC_FLAG_MSI_CAPABLE)) //legacy int + { + if(!(irq_vector & (1<<31))) + return IRQ_NONE; /* Not our interrupt */ + } + + if (irq_vector & adapter->eims_other) //link status change + grtnic_check_lsc(adapter); + + else if (((irq_vector & 0x7FFFFFFF) & ~(adapter->eims_other)) == 0) + { + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), ~0, 1); //打开所有的中断 + goto exit_int; + } + + /* would disable interrupts here but EIAM disabled it */ + napi_schedule_irqoff(&q_vector->napi); + +exit_int: + return IRQ_HANDLED; +} + +irqreturn_t grtnic_msix_other(int __always_unused irq, void *data) +{ + struct grtnic_adapter *adapter = data; + struct grtnic_hw *hw = &adapter->hw; + + grtnic_check_lsc(adapter); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), adapter->eims_other, 1); //打开相应的中断,user_interrupt + + return IRQ_HANDLED; +} + +irqreturn_t grtnic_msix_ring(int __always_unused irq, void *data) +{ + struct grtnic_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_mdio_read(struct net_device *netdev, int prtad, int devad, + u16 addr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + u16 value; + + if (prtad != hw->phy_addr) + return -EINVAL; + + if(adapter->speed) //10G + { + grtnic_SetPhyAddr(netdev, prtad, devad, addr); //only for 10G phy + grtnic_PhyRead(netdev, prtad, devad, &value); + } + else + { + grtnic_PhyRead(netdev, prtad, addr, &value); + } + + return value; +} + +static int grtnic_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + if (prtad != hw->phy_addr) + return -EINVAL; + + if(adapter->speed) //10G + { + grtnic_SetPhyAddr(netdev, prtad, devad, addr); //only for 10G phy + grtnic_PhyWrite(netdev, prtad, devad, value); + } + else + { + grtnic_PhyWrite(netdev, prtad, addr, value); + } + return 0; +} + +static int grtnic_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; + int prtad, devad, ret; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = grtnic_mdio_read(netdev, prtad, devad, mii->reg_num); + if (ret < 0) + return ret; + mii->val_out = ret; + return 0; + } else { + return grtnic_mdio_write(netdev, prtad, devad, mii->reg_num, + mii->val_in); + } +} + +static int grtnic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + case SIOCGMIIREG: + case SIOCSMIIREG: + return grtnic_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_set_mac(struct net_device *netdev, void *p) +{ +// struct xdmanet_port *xdmanet_port = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return (-EADDRNOTAVAIL); +// memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); + + grtnic_SetMacAddress(netdev, netdev->dev_addr); //added + + grtnic_SetMacPauseAddress(netdev, addr->sa_data); + + write_flash_macaddr(netdev); + + return 0; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); +//#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +//#endif + +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > GRTNIC_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + +#endif + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + adapter->max_frame_size = max_frame; + + grtnic_SetMaxFrameLen(netdev, max_frame); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + grtnic_reinit_locked(adapter); + + return 0; +} + + +static u32 hash_mc_addr(struct net_device *netdev, u8 *mc_addr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + + u32 hash_value, hash_mask; + u8 bit_shift = 0; + +// printk("add=%02x:%02x:%02x:%02x:%02x:%02x\n", mc_addr[5],mc_addr[4],mc_addr[3],mc_addr[2],mc_addr[1],mc_addr[0]); + + /* Register count multiplied by bits per register */ + hash_mask = (mac->mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ +/* switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + }*/ + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +void update_mc_addr_list(struct net_device *netdev, u8 *mc_addr_list, u32 mc_addr_count) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + + u32 hash_value, hash_bit, hash_reg; + int i; + + mac->mta_reg_count = 128; + + /* clear mta_shadow */ + memset(&mac->mta_shadow, 0, sizeof(mac->mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = hash_mc_addr(netdev, mc_addr_list); + + hash_reg = (hash_value >> 5) & (mac->mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mac->mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + GRTNIC_WRITE_REG(hw, MAC_HASH_TABLE_START, 0, 0); + + /* replace the entire MTA table */ + for (i = 0; i< mac->mta_reg_count; i++) + GRTNIC_WRITE_REG(hw, MAC_HASH_TABLE_WR, mac->mta_shadow[i], 0); +} + + + +static int grtnic_write_mc_addr_list(struct net_device *netdev) +{ + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + update_mc_addr_list(netdev, NULL, 0); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* update_mc_addr_list expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) +#ifdef NETDEV_HW_ADDR_T_MULTICAST + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); +#else + memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN); +#endif + + update_mc_addr_list(netdev, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +static void grtnic_set_rx_mode(struct net_device *netdev) +{ + int count; + u32 rctl, multicast_mode, all_multicast_mode, promisc_mode; + + promisc_mode = 1; + all_multicast_mode = 2; + multicast_mode = 4; + + rctl = grtnic_GetAdrsFilter(netdev); + rctl &= 0x0000000f; + + /* clear the affected bits */ + rctl &= ~(multicast_mode | all_multicast_mode| promisc_mode); //muliticast & all multicast & promisc + + /* Check for Promiscuous and All Multicast modes */ + + if (netdev->flags & IFF_PROMISC) + { + rctl |= promisc_mode; //promisc + } + + else + { + if (netdev->flags & IFF_ALLMULTI) + { + rctl |= all_multicast_mode; + } + else if (!netdev_mc_empty(netdev)) + { + count = netdev_mc_count(netdev); + rctl |= multicast_mode; + count = grtnic_write_mc_addr_list(netdev); + if (count < 0) + rctl |= all_multicast_mode; + } + } + grtnic_SetAdrsFilter(netdev, rctl); +} + + + +/** + * grtnic_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ + +void grtnic_update_stats(struct grtnic_adapter *adapter) +{ +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct grtnic_hw_stats *hwstats = &adapter->stats; + u32 temp_val; + + u32 i; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 alloc_rx_page = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + + if (test_bit(__GRTNIC_DOWN, &adapter->state) || + test_bit(__GRTNIC_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct grtnic_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page = alloc_rx_page; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + temp_val = GRTNIC_READ_REG(&adapter->hw, MAC_RX_OVERFLOW_FRAME, 0); + if(temp_val < hwstats->mpc) + hwstats->mpc = 0x100000000 + temp_val; + else + hwstats->mpc = temp_val; + + net_stats->rx_missed_errors = hwstats->mpc; + + hwstats->ruc = grtnic_get_statistics_cnt(adapter, 0x210, hwstats->ruc); + hwstats->roc = grtnic_get_statistics_cnt(adapter, 0x250, hwstats->roc); + hwstats->rfc = grtnic_get_statistics_cnt(adapter, 0x218, hwstats->rfc); //crc error(<64) + hwstats->crcerrs = grtnic_get_statistics_cnt(adapter, 0x298, hwstats->crcerrs); //crc error(>=64) + hwstats->rlec = grtnic_get_statistics_cnt(adapter, 0x2B8, hwstats->rlec); + hwstats->badopcode = grtnic_get_statistics_cnt(adapter, 0x2D0, hwstats->badopcode); + hwstats->algnerrc = grtnic_get_statistics_cnt(adapter, 0x340, hwstats->algnerrc); + + net_stats->rx_errors = hwstats->rfc + + hwstats->crcerrs + + hwstats->algnerrc + + hwstats->ruc + + hwstats->roc + + hwstats->rlec + + hwstats->badopcode; + + net_stats->rx_length_errors = hwstats->ruc + hwstats->roc + hwstats->rlec; + net_stats->rx_crc_errors = hwstats->rfc + hwstats->crcerrs; + net_stats->rx_frame_errors = hwstats->algnerrc; + + hwstats->ecol = grtnic_get_statistics_cnt(adapter, 0x330, hwstats->ecol); + hwstats->latecol = grtnic_get_statistics_cnt(adapter, 0x328, hwstats->latecol); + hwstats->tx_underrun = grtnic_get_statistics_cnt(adapter, 0x2F0, hwstats->tx_underrun); + + net_stats->tx_errors = hwstats->ecol + hwstats->latecol + hwstats->tx_underrun; + net_stats->tx_aborted_errors = hwstats->ecol; + net_stats->tx_window_errors = hwstats->latecol; + net_stats->tx_carrier_errors = hwstats->tx_underrun; +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + hwstats->gprc = grtnic_get_statistics_cnt(adapter, 0x290, hwstats->gprc); + hwstats->gorc = grtnic_get_statistics_cnt(adapter, 0x200, hwstats->gorc); + hwstats->bprc = grtnic_get_statistics_cnt(adapter, 0x2A0, hwstats->bprc); + hwstats->mprc = grtnic_get_statistics_cnt(adapter, 0x2A8, hwstats->mprc); + + hwstats->prc64 = grtnic_get_statistics_cnt(adapter, 0x220, hwstats->prc64); + hwstats->prc127 = grtnic_get_statistics_cnt(adapter, 0x228, hwstats->prc127); + hwstats->prc255 = grtnic_get_statistics_cnt(adapter, 0x230, hwstats->prc255); + hwstats->prc511 = grtnic_get_statistics_cnt(adapter, 0x238, hwstats->prc511); + hwstats->prc1023 = grtnic_get_statistics_cnt(adapter, 0x240, hwstats->prc1023); + hwstats->prc1522 = grtnic_get_statistics_cnt(adapter, 0x248, hwstats->prc1522); + hwstats->prcoversize = grtnic_get_statistics_cnt(adapter, 0x250, hwstats->prcoversize); + + hwstats->scc = grtnic_get_statistics_cnt(adapter, 0x310, hwstats->scc); + hwstats->mcc = grtnic_get_statistics_cnt(adapter, 0x318, hwstats->mcc); + hwstats->dc = grtnic_get_statistics_cnt(adapter, 0x320, hwstats->dc); + hwstats->rxpause = grtnic_get_statistics_cnt(adapter, 0x2C8, hwstats->rxpause); + hwstats->txpause = grtnic_get_statistics_cnt(adapter, 0x308, hwstats->txpause); + + hwstats->gptc = grtnic_get_statistics_cnt(adapter, 0x2D8, hwstats->gptc); + hwstats->gotc = grtnic_get_statistics_cnt(adapter, 0x208, hwstats->gotc); + hwstats->bptc = grtnic_get_statistics_cnt(adapter, 0x2E0, hwstats->bptc); + hwstats->mptc = grtnic_get_statistics_cnt(adapter, 0x2E8, hwstats->mptc); + + hwstats->ptc64 = grtnic_get_statistics_cnt(adapter, 0x258, hwstats->ptc64); + hwstats->ptc127 = grtnic_get_statistics_cnt(adapter, 0x260, hwstats->ptc127); + hwstats->ptc255 = grtnic_get_statistics_cnt(adapter, 0x268, hwstats->ptc255); + hwstats->ptc511 = grtnic_get_statistics_cnt(adapter, 0x270, hwstats->ptc511); + hwstats->ptc1023 = grtnic_get_statistics_cnt(adapter, 0x278, hwstats->ptc1023); + hwstats->ptc1522 = grtnic_get_statistics_cnt(adapter, 0x280, hwstats->ptc1522); + hwstats->ptcoversize = grtnic_get_statistics_cnt(adapter, 0x288, hwstats->ptcoversize); +} + + +#ifdef HAVE_NDO_GET_STATS64 +static void grtnic_get_ring_stats64(struct rtnl_link_stats64 *stats, struct grtnic_ring *ring) +{ + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } +} + +/** + * grtnic_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ixgbe_get_stats for kernels which support it. + */ +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void grtnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 * +grtnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct grtnic_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *ring = READ_ONCE(adapter->tx_ring[i]); + + grtnic_get_ring_stats64(stats, ring); + } + + rcu_read_unlock(); + + /* following stats updated by grtnic_watchdog_task() */ +// stats->multicast = netdev->stats.multicast; +// stats->rx_errors = netdev->stats.rx_errors; +// stats->rx_length_errors = netdev->stats.rx_length_errors; +// stats->rx_crc_errors = netdev->stats.rx_crc_errors; +// stats->rx_missed_errors = netdev->stats.rx_missed_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + + return stats; +#endif +} + +#else + +/** + * grtnic_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + **/ +static struct net_device_stats *grtnic_get_stats(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + grtnic_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif //HAVE_NDO_GET_STATS64 + +///////////////////////////////////////////////////////////////////////////////////////////////// +#ifdef HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_USER_QUEUE + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused struct net_device *sb_dev, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb) +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + int card_type = adapter->ei->type; + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index = -1; + + if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) + { + if (skb_rx_queue_recorded(skb)) + { + new_index = skb_get_rx_queue(skb); + while (unlikely(new_index >= dev->real_num_tx_queues)) + new_index -= dev->real_num_tx_queues; + + if (queue_index != new_index && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, new_index); + + return new_index; + } + } + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) + return netdev_pick_tx(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + return fallback(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + return fallback(dev, skb); +#else + return __netdev_pick_tx(dev, skb); +#endif +} +#endif /* CONFIG_USER_QUEUE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +///////////////////////////////////////////////////////////////////////////////////////////////// + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void grtnic_netpoll(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + /* if interface is down do nothing */ + if (test_bit(__GRTNIC_DOWN, &adapter->state)) + return; + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + grtnic_msix_ring(0, adapter->q_vector[i]); + adapter->q_vector[i]->netpoll_rx = false; + } + } else { + grtnic_isr(0, adapter); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + +///////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifdef HAVE_NET_DEVICE_OPS +static const struct net_device_ops grtnic_netdev_ops = { + .ndo_open = grtnic_open, + .ndo_stop = grtnic_close, + .ndo_start_xmit = grtnic_xmit_frame, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = grtnic_get_stats64, +#else + .ndo_get_stats = grtnic_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = grtnic_netpoll, +#endif +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = grtnic_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ + +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = grtnic_change_mtu, +#else + .ndo_change_mtu = grtnic_change_mtu, +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ + +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = grtnic_ioctl, +#else + .ndo_do_ioctl = grtnic_ioctl, +#endif /* HAVE_NDO_ETH_IOCTL */ + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + + .ndo_set_rx_mode = grtnic_set_rx_mode, + .ndo_set_mac_address= grtnic_set_mac, +#ifdef HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_USER_QUEUE + .ndo_select_queue = grtnic_select_queue, +#else +#ifndef HAVE_MQPRIO + .ndo_select_queue = __netdev_pick_tx, +#endif +#endif /* CONFIG_USER_QUEUE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +// .ndo_tx_timeout = xdmanet_tx_timeout, +}; +#endif /* HAVE_NET_DEVICE_OPS */ + +///////////////////////////////////////////////////////////////////////////////////////////////// + +void grtnic_assign_netdev_ops(struct net_device *netdev) +{ +#ifdef HAVE_NET_DEVICE_OPS + netdev->netdev_ops = &grtnic_netdev_ops; +#else + netdev->open = &grtnic_open; + netdev->stop = &grtnic_close; + netdev->hard_start_xmit = &grtnic_xmit_frame; + netdev->get_stats = &grtnic_get_stats; +#ifdef HAVE_SET_RX_MODE + netdev->set_rx_mode = &grtnic_set_rx_mode; +#endif + netdev->set_multicast_list = &grtnic_set_rx_mode; + + netdev->set_mac_address = &grtnic_set_mac; + netdev->change_mtu = &grtnic_change_mtu; +// netdev->tx_timeout = &xdmanet_tx_timeout; + netdev->do_ioctl = &grtnic_ioctl; + +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = &grtnic_netpoll; +#endif + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_USER_QUEUE + netdev->select_queue = &grtnic_select_queue; +#else + netdev->select_queue = &__netdev_pick_tx; +#endif /*CONFIG_USER_QUEUE*/ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /*HAVE_NET_DEVICE_OPS*/ + +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(netdev)->ndo_busy_poll = grtnic_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + + grtnic_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; +} \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_nvm.c b/drivers/net/ethernet/guangruntong/grtnic_nvm.c new file mode 100755 index 00000000000000..035d1071b7a9f9 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_nvm.c @@ -0,0 +1,125 @@ +#include "grtnic.h" +#include "grtnic_nvm.h" + +///////////////////////////////////////////////////////////////////////////////////////////// +int erase_sector_flash(struct grtnic_adapter *adapter, u32 offset) //erase 0x10000(64k) every time +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + + writel( (SPI_CMD_ADDR(offset) | SPI_CMD_CMD(SECTOR_ERASE_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, + SPI_STATUS_OPDONE, SPI_STATUS_OPDONE, + SPI_ERASE_TIMEOUT, 0); + + if (status) { + printk("FLASH erase timed out\n"); + } + + return status; +} + + +int erase_subsector_flash(struct grtnic_adapter *adapter, u32 offset) //erase 0x1000(4k) every time +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + + writel( (SPI_CMD_ADDR(offset) | SPI_CMD_CMD(SUBSECTOR_ERASE_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, SPI_STATUS_OPDONE, SPI_STATUS_OPDONE, SPI_ERASE_TIMEOUT, 0); + if (status) { + printk("FLASH erase timed out\n"); + } + + return status; +} + +/** + * ngbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +int write_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data) +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + u32 i; + + for (i = 0; i < dwords; i++) { + writel(be32(data[i]), hw->user_bar + SPI_DATA); + writel( (SPI_CMD_ADDR(offset + (i << 2)) | SPI_CMD_CMD(PAGE_PROG_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, + SPI_STATUS_OPDONE, SPI_STATUS_OPDONE, + SPI_TIMEOUT, 0); + if (status) { + printk("FLASH write timed out\n"); + break; + } + } + + return status; +} + +/** + * ngbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +int read_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data) +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + u32 i; + + for (i = 0; i < dwords; i++) { + writel( (SPI_CMD_ADDR(offset + (i << 2)) | SPI_CMD_CMD(PAGE_READ_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, + SPI_DATA_OP_DONE, SPI_DATA_OP_DONE, + SPI_TIMEOUT, 0); + if (status != 0) { + printk("FLASH read timed out\n"); + break; + } + + data[i] = readl(hw->user_bar + SPI_DATA); + } + + return status; +} + +void write_flash_macaddr(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + u32 *temp; + + int firmware_offset = adapter->speed; + int port = adapter->func; + u32 offset = VPD_OFFSET - (firmware_offset * 0x100000); + + temp = vmalloc(FLASH_SUBSECTOR_SIZE); + memset(temp, 0x00, FLASH_SUBSECTOR_SIZE); + + read_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, temp); //subsector is 4K + erase_subsector_flash(adapter, offset); + + temp[(MAC_ADDR_OFFSET>>2) + port*2] = (netdev->dev_addr[2] << 24 | netdev->dev_addr[3] << 16 | netdev->dev_addr[4] << 8 | netdev->dev_addr[5]); + temp[(MAC_ADDR_OFFSET>>2) + port*2+1] = (netdev->dev_addr[0] << 8 | netdev->dev_addr[1]); + + write_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, temp); + vfree(temp); +} diff --git a/drivers/net/ethernet/guangruntong/grtnic_nvm.h b/drivers/net/ethernet/guangruntong/grtnic_nvm.h new file mode 100755 index 00000000000000..5fb68361aecb6c --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_nvm.h @@ -0,0 +1,72 @@ +#ifndef GRTNIC_NVM_H +#define GRTNIC_NVM_H + +#define be32(x) ((x<<24 & 0xff000000) | (x<<8 & 0x00ff0000) | (x>>8 & 0x0000ff00) | (x>>24 & 0x000000ff)) +#define be16(x) ((x<<8 & 0xff00) | (x>>8 & 0x00ff)) + +////////////////////////////////////////////////// +#define PAGE_READ_CMD 0x00 +#define SECTOR_ERASE_CMD 0x01 +#define SUBSECTOR_ERASE_CMD 0x02 +#define PAGE_PROG_CMD 0x03 + + +#define MAX_FLASH_LOAD_POLL_TIME 10 + +#define SPI_CMD 0x0400 +#define SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) + +#define SPI_DATA 0x0404 + +#define SPI_STATUS 0x0408 +#define SPI_STATUS_OPDONE ((0x1)) +#define SPI_DATA_OP_DONE ((0x2)) + +#define SPI_ERASE_TIMEOUT 2000000 +#define SPI_TIMEOUT 20000 + +//////////////////////////////////////////////////////////////// +#define VPD_OFFSET 0xEFF000 +#define MAC_ADDR_OFFSET 0x100 +#define VERSION_OFFSET 0x200 + +#define PXE_OFFSET 0xE00000 + +#define FLASH_SECTOR_SIZE 0x10000 //64k +#define FLASH_SUBSECTOR_SIZE 0x1000 //4k +//////////////////////////////////////////////////////////////// + +static inline int po32m(uint8_t* hw, u32 reg, u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; +// printf("loop = %d, usecs = %d\n", loop, usecs); + do { + u32 value = readl(hw + reg); + + if ((value & mask) == (field & mask)) { + break; + } + + if (loop-- <= 0) + break; + +// udelay(20); + usleep_range(20,20); + } while (1); + + return (count - loop <= count ? 0 : 1); +} + +int erase_sector_flash(struct grtnic_adapter *adapter, u32 offset); +int erase_subsector_flash(struct grtnic_adapter *adapter, u32 offset); +int write_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data); +int read_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data); +void write_flash_macaddr(struct net_device *netdev); + +#endif /* GRTNIC_NVM_H */ diff --git a/drivers/net/ethernet/guangruntong/grtnic_param.c b/drivers/net/ethernet/guangruntong/grtnic_param.c new file mode 100755 index 00000000000000..7a16cf48137cd7 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_param.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2021 Intel Corporation. */ + +#include +#include + +#include "grtnic.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define GRTNIC_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define GRTNIC_PARAM_INIT { [0 ... GRTNIC_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when ixgbe_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define GRTNIC_PARAM(X, desc) \ + static const int __devinitdata X[GRTNIC_MAX_NIC+1] = GRTNIC_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(GRTNIC_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define GRTNIC_PARAM(X, desc) \ + static int X[GRTNIC_MAX_NIC+1] = GRTNIC_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif //module_param_array + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +GRTNIC_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define GRTNIC_INT_LEGACY 0 +#define GRTNIC_INT_MSI 1 +#define GRTNIC_INT_MSIX 2 + +GRTNIC_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); + + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 956-488281 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR 1 +GRTNIC_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,956-488281), default 1"); +#define MAX_ITR 488281 +#define MIN_ITR 956 + + +GRTNIC_PARAM(csum_tx_mode, "Disable or enable tx hecksum offload, default 1"); +GRTNIC_PARAM(csum_rx_mode, "Disable or enable rx hecksum offload, default 1"); + + + +struct grtnic_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct grtnic_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int grtnic_validate_option(struct net_device *netdev, + unsigned int *value, + struct grtnic_option *opt) +{ + if (*value == OPTION_UNSET) { + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || + *value == opt->def) { + if (opt->msg) + netdev_info(netdev, "%s set to %d, %s\n", + opt->name, *value, opt->msg); + else + netdev_info(netdev, "%s set to %d\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + + for (i = 0; i < opt->arg.l.nr; i++) { + const struct grtnic_opt_list *ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(netdev, "%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) +#define PSTR_LEN 10 + +/** + * grtnic_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void grtnic_check_options(struct grtnic_adapter *adapter) +{ + int bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + + if (bd >= GRTNIC_MAX_NIC) { + netdev_notice(adapter->netdev, + "Warning: no configuration for board #%d\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); +#ifndef module_param_array + bd = GRTNIC_MAX_NIC; +#endif + } + + + { /* Interrupt Mode */ + unsigned int int_mode; + static struct grtnic_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of " __MODULE_STRING(GRTNIC_INT_MSIX), + .def = GRTNIC_INT_MSIX, + .arg = { .r = { .min = GRTNIC_INT_LEGACY, + .max = GRTNIC_INT_MSIX} } + }; + +#ifdef module_param_array + if (num_IntMode > bd || num_InterruptType > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + grtnic_validate_option(adapter->netdev, + &int_mode, &opt); + switch (int_mode) { + case GRTNIC_INT_MSIX: + if (!(*aflags & GRTNIC_FLAG_MSIX_CAPABLE)) + netdev_info(adapter->netdev, + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case GRTNIC_INT_MSI: + if (!(*aflags & GRTNIC_FLAG_MSI_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~GRTNIC_FLAG_MSIX_CAPABLE; + } + break; + case GRTNIC_INT_LEGACY: + default: + *aflags &= ~GRTNIC_FLAG_MSIX_CAPABLE; + *aflags &= ~GRTNIC_FLAG_MSI_CAPABLE; + break; + } +#ifdef module_param_array + } else { + /* default settings */ + if (*aflags & GRTNIC_FLAG_MSIX_CAPABLE) { + *aflags |= GRTNIC_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~GRTNIC_FLAG_MSIX_CAPABLE; + *aflags &= ~GRTNIC_FLAG_MSI_CAPABLE; + } + } +#endif + } + + { /* Interrupt Throttling Rate */ + static struct grtnic_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + grtnic_validate_option(adapter->netdev, + &itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (1000000/itr) << 2; + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } +#endif + } + + { /* Tx Checksum Support */ + static struct grtnic_option opt = { + .type = enable_option, + .name = "Tx checksum Enable", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_csum_tx_mode > bd) { +#endif + unsigned int csum_tx = csum_tx_mode[bd]; + grtnic_validate_option(adapter->netdev, &csum_tx, &opt); + if (csum_tx) + *aflags |= GRTNIC_FLAG_TXCSUM_CAPABLE; + else + *aflags &= ~GRTNIC_FLAG_TXCSUM_CAPABLE; +#ifdef module_param_array + } else { + *aflags |= GRTNIC_FLAG_TXCSUM_CAPABLE; + } +#endif + } + + { /* Rx Checksum Support */ + static struct grtnic_option opt = { + .type = enable_option, + .name = "Rx checksum Enable", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_csum_rx_mode > bd) { +#endif + unsigned int csum_rx = csum_rx_mode[bd]; + grtnic_validate_option(adapter->netdev, &csum_rx, &opt); + if (csum_rx) + *aflags |= GRTNIC_FLAG_RXCSUM_CAPABLE; + else + *aflags &= ~GRTNIC_FLAG_RXCSUM_CAPABLE; +#ifdef module_param_array + } else { + *aflags |= GRTNIC_FLAG_RXCSUM_CAPABLE; + } +#endif + } + + +} diff --git a/drivers/net/ethernet/guangruntong/grtnic_proc.c b/drivers/net/ethernet/guangruntong/grtnic_proc.c new file mode 100755 index 00000000000000..ae0e2f43fe986e --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_proc.c @@ -0,0 +1,233 @@ +#include +//#include +#include "grtnic.h" +#ifdef GRTNIC_PROCFS + +struct proc_dir_entry *grtnic_top_dir = NULL; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) ) +//ssize_t update_firmware(struct file *file, const char __user *buffer, size_t count, loff_t *pos); + +//static int grtnic_driver_generic_read(struct seq_file *m, void *v) +//{ +// return 0; +//} + +//static int grtnic_driver_generic_open(struct inode *inode, struct file *file) +//{ +// return single_open(file, NULL, PDE_DATA(inode)); +//} + +//////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_driver_pktcnt_read(struct seq_file *m, void *v) +{ + struct grtnic_adapter *adapter = (struct grtnic_adapter *)m->private; + + seq_printf(m, "tx_count0 = %u, tx_count1 = %u, rx_count = %u\n", adapter->tx_count0, adapter->tx_count1, adapter->rx_count); + return 0; +} + +static int grtnic_driver_pktcnt_open(struct inode *inode, struct file *file) +{ + return single_open(file, grtnic_driver_pktcnt_read, PDE_DATA(inode)); +} +//////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_hardware_pktcnt_read(struct seq_file *m, void *v) +{ + u32 h2c_cnt, c2h_cnt, rx_cnt; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)m->private; + struct grtnic_hw *hw = &adapter->hw; + + h2c_cnt = readl(hw->user_bar + 0x604); + c2h_cnt = readl(hw->user_bar + 0x608); + rx_cnt = readl(hw->user_bar + 0x60C); + + seq_printf(m, "h2c_count = %u, c2h_count = %u, rx_count = %u\n", h2c_cnt, c2h_cnt, rx_cnt); + return 0; +} + +static int grtnic_hardware_pktcnt_open(struct inode *inode, struct file *file) +{ + return single_open(file, grtnic_hardware_pktcnt_read, PDE_DATA(inode)); +} +//////////////////////////////////////////////////////////////////////////////////////////////// + +static int grtnic_hardware_error(struct seq_file *m, void *v) +{ + u32 var; + int read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)m->private; + struct grtnic_hw *hw = &adapter->hw; + + var = readl(hw->user_bar + 0x600); + + read_count_error = (var >> 31) & 0x01; + sgtxfifo_error = (var >> 16) & 0xff; + sgrxfifo_error = (var >> 8) & 0xff; + mainfifo_error = (var >> 0) & 0xff; + seq_printf(m, "read_count_error = %d, sgtxfifo_error = %d, sgrxfifo_error = %d, mainfifo_error = %d\n", read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error); + return 0; +} + +static int grtnic_hardware_error_open(struct inode *inode, struct file *file) +{ + return single_open(file, grtnic_hardware_error, PDE_DATA(inode)); +} + +//////////////////////////////////////////////////////////////////////////////////////////////// +struct grtnic_proc_type { + char *name; + int (*open)(struct inode *inode, struct file *file); + int (*read)(struct seq_file *m, void *v); + ssize_t (*write)(struct file *file, const char __user *buffer, size_t count, loff_t *pos); +}; + +struct grtnic_proc_type grtnic_proc_entries[] = { + {"pktcnt", &grtnic_driver_pktcnt_open, NULL, NULL}, + {"fpktcnt", &grtnic_hardware_pktcnt_open, NULL, NULL}, + {"fharderr", &grtnic_hardware_error_open, NULL, NULL}, +// {"update", &grtnic_driver_generic_open, NULL, &update_firmware}, + {NULL, NULL, NULL, NULL} +}; + +//////////////////////////////////////////////////////////////////////////////////////////////// + + +#else //LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) +//int update_firmware(struct file *file, const char *buffer, unsigned long count, void *data); + +static int grtnic_driver_pktcnt_read(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct grtnic_adapter *adapter = (struct grtnic_adapter *)data; + + return snprintf(page, count, "tx_count0 = %u, tx_count1 = %u, rx_count = %d\n", adapter->tx_count0, adapter->tx_count1, adapter->rx_count); +} + +static int grtnic_hardware_pktcnt_read(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + u32 h2c_cnt, c2h_cnt, rx_cnt; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)data; + struct grtnic_hw *hw = &adapter->hw; + + h2c_cnt = readl(hw->user_bar + 0x604); + c2h_cnt = readl(hw->user_bar + 0x608); + rx_cnt = readl(hw->user_bar + 0x60C); + + return snprintf(page, count, "h2c_count = %u, c2h_count = %u, rx_count = %u\n", h2c_cnt, c2h_cnt, rx_cnt); +} + +static int grtnic_hardware_error(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + u32 var; + int read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)data; + struct grtnic_hw *hw = &adapter->hw; + + var = readl(hw->user_bar + 0x600); + + read_count_error = (var >> 31) & 0x01; + sgtxfifo_error = (var >> 16) & 0xff; + sgrxfifo_error = (var >> 8) & 0xff; + mainfifo_error = (var >> 0) & 0xff; + + return snprintf(page, count, "read_count_error = %d, sgtxfifo_error = %d, sgrxfifo_error = %d, mainfifo_error = %d\n", read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error); +} + +struct grtnic_proc_type { + char *name; + int (*read)(char *page, char **start, off_t off, int count, int *eof, void *data); + int (*write)(struct file *file, const char *buffer, unsigned long count, void *data); +}; + +struct grtnic_proc_type grtnic_proc_entries[] = { + {"pktcnt", &grtnic_driver_pktcnt_read, NULL}, + {"fpktcnt", &grtnic_hardware_pktcnt_read, NULL}, + {"fharderr", &grtnic_hardware_error, NULL}, +// {"update", NULL, &update_firmware}, + {NULL, NULL, NULL} +}; + +#endif + + +int grtnic_procfs_topdir_init() +{ + grtnic_top_dir = proc_mkdir("driver/grtnic", NULL); + if (grtnic_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void grtnic_procfs_topdir_exit() +{ + remove_proc_entry("driver/grtnic", NULL); +} + + +int grtnic_procfs_init(struct grtnic_adapter *adapter) +{ + int index; +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) ) + struct file_operations *fops; +#else + struct proc_dir_entry *p; + mode_t mode = 0; +#endif + + adapter->proc_dir = proc_mkdir(pci_name(adapter->pdev), grtnic_top_dir); + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) ) + for (index = 0; grtnic_proc_entries[index].name; index++) { + fops = kmalloc(sizeof(struct file_operations), GFP_KERNEL); + fops->open = grtnic_proc_entries[index].open; + fops->read = seq_read; + fops->write = grtnic_proc_entries[index].write; + fops->llseek = seq_lseek; + fops->release = single_release; + proc_create_data(grtnic_proc_entries[index].name, 0644, adapter->proc_dir, fops, adapter); + } + +#else + for (index = 0; grtnic_proc_entries[index].name; index++) { + if (grtnic_proc_entries[index].read) + mode = S_IFREG | S_IRUGO; + if (grtnic_proc_entries[index].write) + mode |= S_IFREG | S_IWUSR; + + p = create_proc_entry(grtnic_proc_entries[index].name, mode, adapter->proc_dir); + p->read_proc = grtnic_proc_entries[index].read; + p->write_proc = grtnic_proc_entries[index].write; + p->data = adapter; + } +#endif + + return 0; +} + + +void grtnic_del_proc_entries(struct grtnic_adapter *adapter) +{ + int index; + + if (grtnic_top_dir == NULL) + return; + + for (index = 0; ; index++) + { + if(grtnic_proc_entries[index].name == NULL) + break; + remove_proc_entry(grtnic_proc_entries[index].name, adapter->proc_dir); + } + + if (adapter->proc_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), grtnic_top_dir); +} + + +void grtnic_procfs_exit(struct grtnic_adapter *adapter) +{ + grtnic_del_proc_entries(adapter); +} + +#endif //GRTNIC_PROCFS diff --git a/drivers/net/ethernet/guangruntong/kcompat-generator.sh b/drivers/net/ethernet/guangruntong/kcompat-generator.sh new file mode 100755 index 00000000000000..3bd5ff8ae7c341 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat-generator.sh @@ -0,0 +1,438 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 1999 - 2024 Intel Corporation + +set -Eeuo pipefail + +# This file generates HAVE_ and NEED_ defines for current kernel +# (or KSRC if provided). +# +# It does so by 'gen' function calls (see body of 'gen-devlink' for examples). +# 'gen' could look for various kinds of declarations in provided kernel headers, +# eg look for an enum in one of files specified and check if given enumeration +# (single value) is present. See 'Documentation' or comment above the 'gen' fun +# in the kcompat-lib.sh. + +# Why using bash/awk instead of an old/legacy approach? +# +# The aim is to replicate all the defines provided by human developers +# in the past. Additional bonus is the fact, that we no longer need to care +# about backports done by OS vendors (RHEL, SLES, ORACLE, UBUNTU, more to come). +# We will even work (compile) with only part of backports provided. +# +# To enable smooth transition, especially in time of late fixes, "old" method +# of providing flags should still work as usual. + +# End of intro. +# Find info about coding style/rules at the end of file. +# Most of the implementation is in kcompat-lib.sh, here are actual 'gen' calls. + +export LC_ALL=C +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +ORIG_CWD="$(pwd)" +trap 'rc=$?; echo >&2 "$(realpath "$ORIG_CWD/${BASH_SOURCE[0]}"):$LINENO: failed with rc: $rc"' ERR + +# shellcheck source=kcompat-lib.sh +source "$SCRIPT_DIR"/kcompat-lib.sh + +ARCH=$(uname -m) +IS_ARM= +if [ "$ARCH" == aarch64 ]; then + IS_ARM=1 +fi + +# DO NOT break gen calls below (via \), to make our compat code more grep-able, +# keep them also grouped, first by feature (like DEVLINK), then by .h filename +# finally, keep them sorted within a group (sort by flag name) + +# handy line of DOC copy-pasted form kcompat-lib.sh: +# gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in + +function gen-aux() { + ah='include/linux/auxiliary_bus.h' + mh='include/linux/mod_devicetable.h' + if config_has CONFIG_AUXILIARY_BUS; then + gen HAVE_AUXILIARY_DRIVER_INT_REMOVE if method remove of auxiliary_driver matches 'int' in "$ah" + fi + + # generate HAVE_AUXILIARY_DEVICE_ID only for cases when it's disabled in .config + if ! config_has CONFIG_AUXILIARY_BUS; then + gen HAVE_AUXILIARY_DEVICE_ID if struct auxiliary_device_id in "$mh" + fi +} + +function gen-bitfield() { + bf='include/linux/bitfield.h' + gen HAVE_INCLUDE_BITFIELD if macro FIELD_PREP in "$bf" + gen NEED_BITFIELD_FIELD_FIT if macro FIELD_FIT absent in "$bf" + gen NEED_BITFIELD_FIELD_MASK if fun field_mask absent in "$bf" + gen NEED_BITFIELD_FIELD_MAX if macro FIELD_MAX absent in "$bf" +} + +function gen-device() { + dh='include/linux/device.h' + dph='include/linux/dev_printk.h' + gen NEED_BUS_FIND_DEVICE_CONST_DATA if fun bus_find_device lacks 'const void \\*data' in "$dh" + gen NEED_DEV_LEVEL_ONCE if macro dev_level_once absent in "$dh" "$dph" + gen NEED_DEVM_KASPRINTF if fun devm_kasprintf absent in "$dh" + gen NEED_DEVM_KFREE if fun devm_kfree absent in "$dh" + gen NEED_DEVM_KVASPRINTF if fun devm_kvasprintf absent in "$dh" + gen NEED_DEVM_KZALLOC if fun devm_kzalloc absent in "$dh" +} + +function gen-devlink() { + dh='include/net/devlink.h' + gen HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY if fun devlink_flash_update_begin_notify in "$dh" + gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS if struct devlink_flash_update_params in "$dh" + gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW if struct devlink_flash_update_params matches 'struct firmware \\*fw' in "$dh" + gen HAVE_DEVLINK_HEALTH if enum devlink_health_reporter_state in "$dh" + gen HAVE_DEVLINK_HEALTH_OPS_EXTACK if method dump of devlink_health_reporter_ops matches extack in "$dh" + gen HAVE_DEVLINK_INFO_DRIVER_NAME_PUT if fun devlink_info_driver_name_put in "$dh" + gen HAVE_DEVLINK_PARAMS if method validate of devlink_param matches extack in "$dh" + gen HAVE_DEVLINK_PARAMS_PUBLISH if fun devlink_params_publish in "$dh" + gen HAVE_DEVLINK_PORT_NEW if method port_new of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_OPS if struct devlink_port_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT if method port_split of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT if method port_split of devlink_port_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_EXTACK if method port_split of devlink_ops matches extack in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_EXTACK if method port_split of devlink_port_ops matches extack in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_IN_OPS if method port_split of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_IN_PORT_OPS if method port_split of devlink_port_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT if method port_split of devlink_ops matches devlink_port in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT if method port_split of devlink_port_ops matches devlink_port in "$dh" + gen HAVE_DEVLINK_PORT_TYPE_ETH_HAS_NETDEV if fun devlink_port_type_eth_set matches 'struct net_device' in "$dh" + gen HAVE_DEVLINK_RATE_NODE_CREATE if fun devl_rate_node_create in "$dh" + # keep devlink_region_ops body in variable, to not look 4 times for + # exactly the same thing in big file + # please consider it as an example of "how to speed up if needed" + REGION_OPS="$(find-struct-decl devlink_region_ops "$dh")" + gen HAVE_DEVLINK_REGIONS if struct devlink_region_ops in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT if fun snapshot in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS if fun snapshot matches devlink_region_ops in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGISTER_SETS_DEV if fun devlink_register matches 'struct device' in "$dh" + gen HAVE_DEVLINK_RELOAD_ENABLE_DISABLE if fun devlink_reload_enable in "$dh" + gen HAVE_DEVLINK_SET_FEATURES if fun devlink_set_features in "$dh" + gen HAVE_DEVL_HEALTH_REPORTER_DESTROY if fun devl_health_reporter_destroy in "$dh" + gen HAVE_DEVL_PORT_REGISTER if fun devl_port_register in "$dh" + gen NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER if fun devlink_health_reporter_create matches auto_recover in "$dh" + gen NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE if fun devlink_resources_unregister matches 'struct devlink_resource \\*' in "$dh" + gen NEED_DEVLINK_TO_DEV if fun devlink_to_dev absent in "$dh" + gen NEED_DEVLINK_UNLOCKED_RESOURCE if fun devl_resource_size_get absent in "$dh" + + gen HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF if enum devlink_port_flavour matches DEVLINK_PORT_FLAVOUR_PCI_SF in include/uapi/linux/devlink.h + gen HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT if enum devlink_reload_action matches DEVLINK_RELOAD_ACTION_FW_ACTIVATE in include/uapi/linux/devlink.h +} + +function gen-dma() { + dma='include/linux/dma-mapping.h' + gen NEED_DMA_ATTRS_PTR if struct dma_attrs in include/linux/dma-attrs.h + gen NEED_DMA_ATTRS if fun dma_map_page_attrs absent in "$dma" +} + +function gen-dpll() { + dh='include/linux/dpll.h' + gen HAVE_DPLL_LOCK_STATUS_ERROR if method lock_status_get of dpll_device_ops matches status_error in "$dh" + gen HAVE_DPLL_PHASE_OFFSET if method phase_offset_get of dpll_pin_ops in "$dh" + gen NEED_DPLL_NETDEV_PIN_SET if fun dpll_netdev_pin_set absent in "$dh" +} + +function gen-ethtool() { + eth='include/linux/ethtool.h' + ueth='include/uapi/linux/ethtool.h' + gen HAVE_ETHTOOL_COALESCE_EXTACK if method get_coalesce of ethtool_ops matches 'struct kernel_ethtool_coalesce \\*' in "$eth" + gen HAVE_ETHTOOL_EXTENDED_RINGPARAMS if method get_ringparam of ethtool_ops matches 'struct kernel_ethtool_ringparam \\*' in "$eth" + gen HAVE_ETHTOOL_KEEE if struct ethtool_keee in "$eth" + gen HAVE_ETHTOOL_RXFH_PARAM if struct ethtool_rxfh_param in "$eth" + gen NEED_ETHTOOL_SPRINTF if fun ethtool_sprintf absent in "$eth" + gen HAVE_ETHTOOL_FLOW_RSS if macro FLOW_RSS in "$ueth" +} + +function gen-filter() { + fh='include/linux/filter.h' + gen NEED_NO_NETDEV_PROG_XDP_WARN_ACTION if fun bpf_warn_invalid_xdp_action lacks 'struct net_device \\*' in "$fh" + gen NEED_XDP_DO_FLUSH if fun xdp_do_flush absent in "$fh" +} + +function gen-flow-dissector() { + gen HAVE_FLOW_DISSECTOR_KEY_PPPOE if enum flow_dissector_key_id matches FLOW_DISSECTOR_KEY_PPPOE in include/net/flow_dissector.h include/net/flow_keys.h + # following HAVE ... CVLAN flag is mistakenly named after an enum key, + # but guards code around function call that was introduced later + gen HAVE_FLOW_DISSECTOR_KEY_CVLAN if fun flow_rule_match_cvlan in include/net/flow_offload.h +} + +function gen-gnss() { + cdh='include/linux/cdev.h' + clh='include/linux/device/class.h' + dh='include/linux/device.h' + gh='include/linux/gnss.h' + th='include/uapi/linux/types.h' + fh='include/linux/fs.h' + + gen HAVE_CDEV_DEVICE if fun cdev_device_add in "$cdh" + gen HAVE_DEV_UEVENT_CONST if method dev_uevent of class matches '(const|RH_KABI_CONST) struct device' in "$clh" "$dh" + gen HAVE_STREAM_OPEN if fun stream_open in "$fh" + # There can be either macro class_create or a function + gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if fun class_create matches 'owner' in "$clh" "$dh" + gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if macro class_create in "$clh" "$dh" + + if ! config_has CONFIG_SUSE_KERNEL; then + gen HAVE_GNSS_MODULE if struct gnss_device in "$gh" + fi + + gen HAVE_POLL_T if typedef __poll_t in "$th" +} + +function gen-mdev() { + mdevh='include/linux/mdev.h' + + gen HAVE_DEV_IN_MDEV_API if method probe of mdev_driver matches 'struct device \\*' in "$mdevh" + gen HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE if method create of mdev_parent_ops matches 'struct kobject \\*' in "$mdevh" +} + +function gen-netdevice() { + ndh='include/linux/netdevice.h' + gen HAVE_NDO_ETH_IOCTL if fun ndo_eth_ioctl in "$ndh" + gen HAVE_NDO_EXTENDED_SET_TX_MAXRATE if method ndo_set_tx_maxrate of net_device_ops_extended in "$ndh" + gen HAVE_NDO_FDB_ADD_VID if method ndo_fdb_del of net_device_ops matches 'u16 vid' in "$ndh" + gen HAVE_NDO_FDB_DEL_EXTACK if method ndo_fdb_del of net_device_ops matches extack in "$ndh" + gen HAVE_NDO_GET_DEVLINK_PORT if method ndo_get_devlink_port of net_device_ops in "$ndh" + gen HAVE_NDO_UDP_TUNNEL_CALLBACK if method ndo_udp_tunnel_add of net_device_ops in "$ndh" + gen HAVE_NETDEV_EXTENDED_MIN_MAX_MTU if struct net_device_extended matches min_mtu in "$ndh" + gen HAVE_NETDEV_MIN_MAX_MTU if struct net_device matches min_mtu in "$ndh" + gen HAVE_NETIF_SET_TSO_MAX if fun netif_set_tso_max_size in "$ndh" + gen HAVE_SET_NETDEV_DEVLINK_PORT if macro SET_NETDEV_DEVLINK_PORT in "$ndh" + gen NEED_NETIF_NAPI_ADD_NO_WEIGHT if fun netif_napi_add matches 'int weight' in "$ndh" + gen NEED_NET_PREFETCH if fun net_prefetch absent in "$ndh" + gen NEED_XDP_FEATURES if enum netdev_xdp_act absent in include/uapi/linux/netdev.h +} + +function gen-pci() { + pcih='include/linux/pci.h' + gen HAVE_PCI_MSIX_ALLOC_IRQ_AT if fun pci_msix_alloc_irq_at in "$pcih" + gen HAVE_PCI_MSIX_CAN_ALLOC_DYN if fun pci_msix_can_alloc_dyn in "$pcih" + gen HAVE_PCI_MSIX_FREE_IRQ if fun pci_msix_free_irq in "$pcih" + gen HAVE_PER_VF_MSIX_SYSFS if method sriov_set_msix_vec_count of pci_driver in "$pcih" + gen HAVE_STRUCT_PCI_DEV_PTM_ENABLED if struct pci_dev matches ptm_enabled in "$pcih" + gen NEED_PCIE_FLR if fun pcie_flr absent in "$pcih" + gen NEED_PCIE_FLR_RETVAL if fun pcie_flr lacks 'int pcie_flr' in "$pcih" + gen NEED_PCIE_PTM_ENABLED if fun pcie_ptm_enabled absent in "$pcih" + gen NEED_PCI_ENABLE_PTM if fun pci_enable_ptm absent in "$pcih" +} + +function gen-stddef() { + stddef='include/linux/stddef.h' + ustddef='include/uapi/linux/stddef.h' + gen HAVE_STDDEF_OFFSETTOEND if macro offsetofend in "$stddef" + gen NEED_DECLARE_FLEX_ARRAY if macro DECLARE_FLEX_ARRAY absent in "$stddef" + gen NEED_STRUCT_GROUP if macro struct_group absent in "$stddef" + gen NEED___STRUCT_GROUP if macro __struct_group absent in "$ustddef" +} + +function gen-vfio() { + # PASID_SUPPORT depends on multiple different functions existing + PASID_FUNC1="$(find-fun-decl mdev_set_iommu_device include/linux/mdev.h)" + PASID_FUNC2="$(find-fun-decl vfio_group_iommu_domain include/linux/vfio.h)" + + gen HAVE_PASID_SUPPORT if string "${PASID_FUNC1:+1}${PASID_FUNC2:+1}" equals 11 + gen HAVE_VFIO_FREE_DEV if fun vfio_free_device in include/linux/vfio.h + gen HAVE_LMV1_SUPPORT if macro VFIO_REGION_TYPE_MIGRATION in include/uapi/linux/vfio.h +} + +function gen-other() { + pciaerh='include/linux/aer.h' + ush='include/linux/u64_stats_sync.h' + gen HAVE_X86_STEPPING if struct cpuinfo_x86 matches x86_stepping in arch/x86/include/asm/processor.h + gen HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING if fun pci_enable_pcie_error_reporting in "$pciaerh" + gen NEED_PCI_AER_CLEAR_NONFATAL_STATUS if fun pci_aer_clear_nonfatal_status absent in "$pciaerh" + gen NEED_BITMAP_COPY_CLEAR_TAIL if fun bitmap_copy_clear_tail absent in include/linux/bitmap.h + gen NEED_BITMAP_FROM_ARR32 if fun bitmap_from_arr32 absent in include/linux/bitmap.h + gen NEED_BITMAP_TO_ARR32 if fun bitmap_to_arr32 absent in include/linux/bitmap.h + gen NEED_ASSIGN_BIT if fun assign_bit absent in include/linux/bitops.h + gen NEED_STATIC_ASSERT if macro static_assert absent in include/linux/build_bug.h + gen NEED_CLEANUP_API if macro __free absent in include/linux/cleanup.h + gen NEED___STRUCT_SIZE if macro __struct_size absent in include/linux/compiler_types.h include/linux/fortify-string.h + gen HAVE_COMPLETION_RAW_SPINLOCK if struct completion matches 'struct swait_queue_head' in include/linux/completion.h + gen NEED_IS_CONSTEXPR if macro __is_constexpr absent in include/linux/const.h include/linux/minmax.h include/linux/kernel.h + gen NEED_DEBUGFS_LOOKUP if fun debugfs_lookup absent in include/linux/debugfs.h + gen NEED_DEBUGFS_LOOKUP_AND_REMOVE if fun debugfs_lookup_and_remove absent in include/linux/debugfs.h + gen NEED_ETH_HW_ADDR_SET if fun eth_hw_addr_set absent in include/linux/etherdevice.h + gen NEED_FIND_NEXT_BIT_WRAP if fun find_next_bit_wrap absent in include/linux/find.h + gen HAVE_FILE_IN_SEQ_FILE if struct seq_file matches 'struct file' in include/linux/fs.h + gen NEED_FS_FILE_DENTRY if fun file_dentry absent in include/linux/fs.h + gen HAVE_HWMON_DEVICE_REGISTER_WITH_INFO if fun hwmon_device_register_with_info in include/linux/hwmon.h + gen NEED_HWMON_CHANNEL_INFO if macro HWMON_CHANNEL_INFO absent in include/linux/hwmon.h + gen NEED_ETH_TYPE_VLAN if fun eth_type_vlan absent in include/linux/if_vlan.h + gen HAVE_IOMMU_DEV_FEAT_AUX if enum iommu_dev_features matches IOMMU_DEV_FEAT_AUX in include/linux/iommu.h + gen NEED_READ_POLL_TIMEOUT if macro read_poll_timeout absent in include/linux/iopoll.h + gen NEED_DEFINE_STATIC_KEY_FALSE if macro DEFINE_STATIC_KEY_FALSE absent in include/linux/jump_label.h + gen NEED_STATIC_BRANCH_LIKELY if macro static_branch_likely absent in include/linux/jump_label.h + gen HAVE_STRUCT_STATIC_KEY_FALSE if struct static_key_false in include/linux/jump_label.h include/linux/jump_label_type.h + gen NEED_DECLARE_STATIC_KEY_FALSE if macro DECLARE_STATIC_KEY_FALSE absent in include/linux/jump_label.h include/linux/jump_label_type.h + gen NEED_LOWER_16_BITS if macro lower_16_bits absent in include/linux/kernel.h + gen NEED_UPPER_16_BITS if macro upper_16_bits absent in include/linux/kernel.h + gen NEED_LIST_COUNT_NODES if fun list_count_nodes absent in include/linux/list.h + + # On aarch64 RHEL systems, mul_u64_u64_div_u64 appears to be declared + # in math64 header, but is not provided by kernel + # so on these systems, set it to need anyway. + if [ "$IS_ARM" ]; then + NEED_MUL_STR=1 + else + MUL_U64_U64_DIV_U64_FUNC="$(find-fun-decl mul_u64_u64_div_u64 include/linux/math64.h)" + NEED_MUL_STR="${MUL_U64_U64_DIV_U64_FUNC:-1}" + fi + gen NEED_MUL_U64_U64_DIV_U64 if string "${NEED_MUL_STR}" equals 1 + + gen HAVE_MDEV_GET_DRVDATA if fun mdev_get_drvdata in include/linux/mdev.h + gen HAVE_MDEV_REGISTER_PARENT if fun mdev_register_parent in include/linux/mdev.h + gen HAVE_VM_FLAGS_API if fun vm_flags_init in include/linux/mm.h + gen HAVE_NL_SET_ERR_MSG_FMT if macro NL_SET_ERR_MSG_FMT in include/linux/netlink.h + gen NEED_DEV_PM_DOMAIN_ATTACH if fun dev_pm_domain_attach absent in include/linux/pm_domain.h include/linux/pm.h + gen NEED_DEV_PM_DOMAIN_DETACH if fun dev_pm_domain_detach absent in include/linux/pm_domain.h include/linux/pm.h + gen NEED_PTP_CLASSIFY_RAW if fun ptp_classify_raw absent in include/linux/ptp_classify.h + gen NEED_PTP_PARSE_HEADER if fun ptp_parse_header absent in include/linux/ptp_classify.h + gen HAVE_PTP_CLOCK_INFO_ADJFINE if method adjfine of ptp_clock_info in include/linux/ptp_clock_kernel.h + gen NEED_DIFF_BY_SCALED_PPM if fun diff_by_scaled_ppm absent in include/linux/ptp_clock_kernel.h + gen NEED_PTP_SYSTEM_TIMESTAMP if fun ptp_read_system_prets absent in include/linux/ptp_clock_kernel.h + gen NEED_RADIX_TREE_EMPTY if fun radix_tree_empty absent in include/linux/radix-tree.h + gen NEED_SCHED_PARAM if struct sched_param absent in include/linux/sched.h + gen NEED_SET_SCHED_FIFO if fun sched_set_fifo absent in include/linux/sched.h + gen NEED_RT_H if macro MAX_RT_PRIO absent in include/linux/sched/prio.h + gen NEED_DEV_PAGE_IS_REUSABLE if fun dev_page_is_reusable absent in include/linux/skbuff.h + gen NEED_NAPI_BUILD_SKB if fun napi_build_skb absent in include/linux/skbuff.h + gen NEED_KREALLOC_ARRAY if fun krealloc_array absent in include/linux/slab.h + gen NEED_SYSFS_MATCH_STRING if macro sysfs_match_string absent in include/linux/string.h + gen NEED_SYSFS_EMIT if fun sysfs_emit absent in include/linux/sysfs.h + gen HAVE_TRACE_ENABLED_SUPPORT if implementation of macro __DECLARE_TRACE matches 'trace_##name##_enabled' in include/linux/tracepoint.h + gen HAVE_TTY_OP_WRITE_SIZE_T if method write of tty_operations matches size_t in include/linux/tty_driver.h + gen HAVE_U64_STATS_FETCH_BEGIN_IRQ if fun u64_stats_fetch_begin_irq in "$ush" + gen HAVE_U64_STATS_FETCH_RETRY_IRQ if fun u64_stats_fetch_retry_irq in "$ush" + gen NEED_U64_STATS_READ if fun u64_stats_read absent in "$ush" + gen NEED_U64_STATS_SET if fun u64_stats_set absent in "$ush" + gen HAVE_NET_RPS_H if macro RPS_NO_FILTER in include/net/rps.h +} + +# all the generations, extracted from main() to keep normal code and various +# prep separated +function gen-all() { + if config_has CONFIG_NET_DEVLINK; then + gen-devlink + fi + gen-netdevice + # code above is covered by unit_tests/test_gold.sh + if [ -n "${JUST_UNIT_TESTING-}" ]; then + return + fi + gen-aux + gen-bitfield + gen-device + gen-dma + gen-dpll + gen-ethtool + gen-filter + gen-flow-dissector + gen-gnss + gen-mdev + gen-pci + gen-stddef + gen-vfio + gen-other +} + +function main() { + if ! [ -d "${KSRC-}" ]; then + echo >&2 "env KSRC=${KSRC-} does not exist or is not a directory" + exit 11 + fi + + # we need some flags from .config or (autoconf.h), try to find it + if [ -z ${CONFIG_FILE-} ]; then + find_config_file + + if [ -z ${CONFIG_FILE-} ]; then + echo >&2 "unable to locate a config file at KSRC=${KSRC}. please set CONFIG_FILE to the kernel configuration file." + exit 10 + fi + fi + + if [ ! -f "${CONFIG_FILE-}" ]; then + echo >&2 ".config passed in by env CONFIG_FILE=${CONFIG_FILE} does not exist or is not a file" + exit 9 + fi + CONFIG_FILE=$(realpath "${CONFIG_FILE-}") + + # check if caller (like our makefile) wants to redirect output to file + if [ -n "${OUT-}" ]; then + + # in case OUT exists, we don't want to overwrite it, instead + # write to a temporary copy. + if [ -s "${OUT}" ]; then + TMP_OUT="$(mktemp "${OUT}.XXX")" + trap "rm -f '${TMP_OUT}'" EXIT + + REAL_OUT="${OUT}" + OUT="${TMP_OUT}" + fi + + exec > "$OUT" + # all stdout goes to OUT since now + echo "/* Autogenerated for KSRC=${KSRC-} via $(basename "$0") */" + fi + + cd "${KSRC}" + + # check if KSRC was ok/if we are in proper place to look for headers + if [ -z "$(filter-out-bad-files include/linux/kernel.h)" ]; then + echo >&2 "seems that there are no kernel includes placed in KSRC=${KSRC} + pwd=$(pwd); ls -l:" + ls -l >&2 + exit 8 + fi + + if [ -z ${UNIFDEF_MODE-} ]; then + echo "#ifndef _KCOMPAT_GENERATED_DEFS_H_" + echo "#define _KCOMPAT_GENERATED_DEFS_H_" + fi + + gen-all + + if [ -z ${UNIFDEF_MODE-} ]; then + echo "#endif /* _KCOMPAT_GENERATED_DEFS_H_ */" + fi + + if [ -n "${OUT-}" ]; then + cd "$ORIG_CWD" + + # Compare and see if anything changed. This avoids updating + # mtime of the file. + if [ -n "${REAL_OUT-}" ]; then + if cmp --silent "${REAL_OUT}" "${TMP_OUT}"; then + # exit now, skipping print of the output since + # there were no changes. the trap should + # cleanup TMP_OUT + exit 0 + fi + + mv -f "${TMP_OUT}" "${REAL_OUT}" + OUT="${REAL_OUT}" + fi + + # dump output, will be visible in CI + if [ -n "${JUST_UNIT_TESTING-}${QUIET_COMPAT-}" ]; then + return + fi + cat -n "$OUT" >&2 + fi +} + +main + +# Coding style: +# - rely on `set -e` handling as much as possible, so: +# - do not use <(bash process substitution) - it breaks error handling; +# - do not put substantial logic in `if`-like statement - it disables error +# handling inside of the conditional (`if big-fun call; then` is substantial) +# - make shellcheck happy - https://www.shellcheck.net +# +# That enables us to move processing out of `if` or `... && ...` statements, +# what finally means that bash error handling (`set -e`) would break on errors. diff --git a/drivers/net/ethernet/guangruntong/kcompat-lib.sh b/drivers/net/ethernet/guangruntong/kcompat-lib.sh new file mode 100755 index 00000000000000..bcc3cc94f9915e --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat-lib.sh @@ -0,0 +1,403 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 1999 - 2024 Intel Corporation + +# to be sourced + +# General shell helpers + +# exit with non-zero exit code; if there is only one param: +# exit with msg $1 and exit code from last command (or 99 if = 0) +# otherwise, exit with $1 and use remaining arguments as msg +function die() { + rc=$? + if [ $# -gt 1 ]; then + rc="$1" + shift + fi + [ "$rc" -ne 0 ] || rc=99 + echo >&2 "$@" + exit $rc +} + +# filter out paths that are not files +# input $@, output via echo; +# note: pass `-` for stdin +# note: outputs nothing if all input files are "bad" (eg. not existing), but it +# is left for caller to decide if this is an erorr condition; +# note: whitespaces are considered "bad" as part of filename, it's an error. +function filter-out-bad-files() { + if [[ $# = 1 && "$1" = '-' ]]; then + echo - + return 0 + fi + if [ $# = 0 ]; then + die 10 "no files passed, use '-' when reading from pipe (|)" + fi + local any=0 diagmsgs=/dev/stderr re=$'[\t \n]' + [ -n "${QUIET_COMPAT-}" ] && diagmsgs=/dev/null + for x in "$@"; do + if [ -e "$x" ]; then + if [[ "$x" =~ $re ]]; then + die 11 "err: filename contains whitespaces: $x." + fi + echo "$x" + any=1 + else + echo >&"$diagmsgs" filtering "$x" out + fi + done + if [ $any = 0 ]; then + echo >&"$diagmsgs" 'all files (for given query) filtered out' + fi +} + +# Basics of regexp explained, as a reference for mostly-C programmers: +# (bash) "regexp-$VAR-regexp" - bash' VARs are placed into "QUOTED" strings +# /\);?$/ - match end of function declaration, $ is end of string +# ^[ \t]* - (heuristic), anything but comment, eg to exclude function docs +# /STH/, /END/ - (awk), print all lines sice STH matched, up to END, inclusive + +# "Whitespace only" +WB='[ \t\n]' + +# Helpers below print the thing that is looked for, for further grep'ping/etc. +# That simplifies process of excluding comments or spares us state machine impl. +# +# We take advantage of current/common linux codebase formatting here. +# +# Functions in this section require input file/s passed as args +# (usually one, but more could be supplied in case of renames in kernel), +# '-' could be used as an (only) file argument to read from stdin/pipe. + +# wrapper over find-something-decl() functions below, to avoid repetition +# pass $what as $1, $end as $2, and $files to look in as rest of args +function find-decl() { + test $# -ge 3 # ensure that there are at least 3 params + local what end files + what="$1" + end="$2" + shift 2 + files="$(filter-out-bad-files "$@")" || die + if [ -z "$files" ]; then + return 0 + fi + # shellcheck disable=SC2086 + awk " + /^$WB*\*/ {next} + $what, $end + " $files +} + +# yield $1 function declaration (signature), don't pass return type in $1 +# looks only in files specified ($2, $3...) +function find-fun-decl() { + test $# -ge 2 + local what end + what="/$WB*([(]\*)?$1$WB*($|[()])/" + end='/\);?$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield $1 enum declaration (type/body) +function find-enum-decl() { + test $# -ge 2 + local what end + what="/^$WB*enum$WB+$1"' \{$/' + end='/\};$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield $1 struct declaration (type/body) +function find-struct-decl() { + test $# -ge 2 + local what end + what="/^$WB*struct$WB+$1"' \{$/' + end='/^\};$/' # that's (^) different from enum-decl + shift + find-decl "$what" "$end" "$@" +} + +# yield first line of $1 macro definition +function find-macro-decl() { + test $# -ge 2 + local what end + # only unindented defines, only whole-word match + what="/^#define$WB+$1"'([ \t\(]|$)/' + end=1 # only first line; use find-macro-implementation-decl for full body + shift + find-decl "$what" "$end" "$@" +} + +# yield full macro implementation +function find-macro-implementation-decl() { + test $# -ge 2 + local what end + # only unindented defines, only whole-word match + what="/^#define$WB+$1"'([ \t\(]|$)/' + # full implementation, until a line not ending in a backslash. + # Does not handle macros with comments embedded within the definition. + end='/[^\\]$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield first line of $1 typedef definition (simple typedefs only) +# this probably won't handle typedef struct { \n int foo;\n}; +function find-typedef-decl() { + test $# -ge 2 + local what end + what="/^typedef .* $1"';$/' + end=1 + shift + find-decl "$what" "$end" "$@" +} + +# gen() - DSL-like function to wrap around all the other +# +# syntax: +# gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in +# gen DEFINE if string "actual" equals "expected" + +# where: +# DEFINE is HAVE_ or NEED_ #define to print; +# `if` is there to just read it easier and made syntax easier to check; +# +# NAME is the name for what we are looking for; +# +# `if string` can be used to check if a provided string matches an expected +# value. The define will only be generated if the strings are exactly +# equal. Otherwise, the define will not be generated. When operating in +# UNIFDEF_MODE, -DDEFINE is output when the strings are equal, while +# -UDEFINE is output when the strings are not equal. This is intended +# for cases where a more complex conditional is required, such as +# generating a define when multiple different functions exist. +# +# Ex: +# +# FUNC1="$(find-fun-decl devlink_foo1 devlink.h)" +# FUNC2="$(find-fun-decl devlink_foo2 devlink.h)" +# gen HAVE_FOO_12 if string "${FUNC1:+1}${FUNC2:+1}" equals "11" +# +# KIND specifies what kind of declaration/definition we are looking for, +# could be: fun, enum, struct, method, macro, typedef, +# 'implementation of macro' +# for KIND=method, we are looking for function ptr named METHOD in struct +# named NAME (two optional args are then necessary (METHOD & of)); +# +# for KIND='implementation of macro' we are looking for the full +# implementation of the macro, not just its first line. This is usually +# combined with "matches" or "lacks". +# +# next [optional] args could be used: +# matches PATTERN - use to grep for the PATTERN within definition +# (eg, for ext_ack param) +# lacks - use to add #define only if there is no match of the PATTERN, +# *but* the NAME is *found* +# absent - the NAME that we grep for must be not found +# (ie: function not exisiting) +# +# without this optional params, behavior is the same as with +# `matches .` - use to grep just for existence of NAME; +# +# `in` is there to ease syntax, similar to `if` before. +# +# is just space-separate list of files to look in, +# single (-) for stdin. +# +# PATTERN is an awk pattern, will be wrapped by two slashes (/) +# +# The usual output is a list of "#define " lines for each flag that has +# a matched definition. When UNIFDEF_MODE is set to a non-zero string, the +# output is instead a sequence of "-D" for each matched definition, and +# "-U" for each definition which didn't match. +function gen() { + test $# -ge 4 || die 20 "too few arguments, $# given, at least 4 needed" + local define if_kw kind name in_kw # mandatory + local of_kw method_name operator pattern # optional + local src_line="${BASH_SOURCE[0]}:${BASH_LINENO[0]}" + define="$1" + if_kw="$2" + kind="$3" + local orig_args_cnt=$# + shift 3 + [ "$if_kw" != if ] && die 21 "$src_line: 'if' keyword expected, '$if_kw' given" + case "$kind" in + string) + local actual_str expect_str equals_kw missing_fmt found_fmt + + test $# -ge 3 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 6 needed" + + actual_str="$1" + equals_kw="$2" + expect_str="$3" + shift 3 + + if [ -z ${UNIFDEF_MODE:+1} ]; then + found_fmt="#define %s\n" + missing_fmt="" + else + found_fmt="-D%s\n" + missing_fmt="-U%s\n" + fi + + if [ "${actual_str}" = "${expect_str}" ]; then + printf -- "$found_fmt" "$define" + else + printf -- "$missing_fmt" "$define" + fi + + return + ;; + fun|enum|struct|macro|typedef) + test $# -ge 3 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 6 needed" + name="$1" + shift + ;; + method) + test $# -ge 5 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed" + method_name="$1" + of_kw="$2" + name="$3" + shift 3 + [ "$of_kw" != of ] && die 23 "$src_line: 'of' keyword expected, '$of_kw' given" + ;; + implementation) + test $# -ge 5 || die 28 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed" + of_kw="$1" + kind="$2" + name="$3" + shift 3 + [ "$of_kw" != of ] && die 29 "$src_line: 'of' keyword expected, '$of_kw' given" + [ "$kind" != macro ] && die 30 "$src_line: implementation only supports 'macro', '$kind' given" + kind=macro-implementation + ;; + *) die 24 "$src_line: unknown KIND ($kind) to look for" ;; + esac + operator="$1" + case "$operator" in + absent) + pattern='.' + in_kw="$2" + shift 2 + ;; + matches|lacks) + pattern="$2" + in_kw="$3" + shift 3 + ;; + in) + operator=matches + pattern='.' + in_kw=in + shift + ;; + *) die 25 "$src_line: unknown OPERATOR ($operator) to look for" ;; + esac + [ "$in_kw" != in ] && die 26 "$src_line: 'in' keyword expected, '$in_kw' given" + test $# -ge 1 || die 27 "$src_line: too few arguments, at least one filename expected" + + local first_decl= + if [ "$kind" = method ]; then + first_decl="$(find-struct-decl "$name" "$@")" || exit 40 + # prepare params for next lookup phase + set -- - # overwrite $@ to be single dash (-) + name="$method_name" + kind=fun + elif [[ $# = 1 && "$1" = '-' ]]; then + # avoid losing stdin provided to gen() due to redirection (<<<) + first_decl="$(cat -)" + fi + + local unifdef + unifdef=${UNIFDEF_MODE:+1} + + # lookup the NAME + local body + body="$(find-$kind-decl "$name" "$@" <<< "$first_decl")" || exit 41 + awk -v define="$define" -v pattern="$pattern" -v "$operator"=1 -v unifdef="$unifdef" ' + BEGIN { + # prepend "identifier boundary" to pattern, also append + # it, but only for patterns not ending with such already + # + # eg: "foo" -> "\bfoo\b" + # "struct foo *" -> "\bstruct foo *" + + # Note that mawk does not support "\b", so we have our + # own approximation, NI + NI = "[^A-Za-z0-9_]" # "Not an Indentifier" + + if (!match(pattern, NI "$")) + pattern = pattern "(" NI "|$)" + pattern = "(^|" NI ")" pattern + } + /./ { not_empty = 1 } + $0 ~ pattern { found = 1 } + END { + if (unifdef) { + found_fmt="-D%s\n" + missing_fmt="-U%s\n" + } else { + found_fmt="#define %s\n" + missing_fmt="" + } + + if (lacks && !found && not_empty || matches && found || absent && !found) + printf(found_fmt, define) + else if (missing_fmt) + printf(missing_fmt, define) + } + ' <<< "$body" +} + +# tell if given flag is enabled in .config +# return 0 if given flag is enabled, 1 otherwise +# inputs: +# $1 - flag to check (whole word, without _MODULE suffix) +# env flag $CONFIG_FILE +# +# there are two "config" formats supported, to ease up integrators lifes +# .config (without leading #~ prefix): +#~ # CONFIG_ACPI_EC_DEBUGFS is not set +#~ CONFIG_ACPI_AC=y +#~ CONFIG_ACPI_VIDEO=m +# and autoconf.h, which would be: +#~ #define CONFIG_ACPI_AC 1 +#~ #define CONFIG_ACPI_VIDEO_MODULE 1 +function config_has() { + grep -qE "^(#define )?$1((_MODULE)? 1|=m|=y)$" "$CONFIG_FILE" +} + +# try to locate a suitable config file from KSRC +# +# On success, the CONFIG_FILE variable will be updated to reflect the full +# path to a configuration file. +# +# Depends on KSRC being set +function find_config_file() { + local -a CSP + local file + local diagmsgs=/dev/stderr + + [ -n "${QUIET_COMPAT-}" ] && diagmsgs=/dev/null + + if ! [ -d "${KSRC-}" ]; then + return + fi + + CSP=( + "$KSRC/include/generated/autoconf.h" + "$KSRC/include/linux/autoconf.h" + "$KSRC/.config" + ) + + for file in "${CSP[@]}"; do + if [ -f $file ]; then + echo >&"$diagmsgs" "using CONFIG_FILE=$file" + CONFIG_FILE=$file + return + fi + done +} diff --git a/drivers/net/ethernet/guangruntong/kcompat.c b/drivers/net/ethernet/guangruntong/kcompat.c new file mode 100755 index 00000000000000..301ea08fe25f75 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat.c @@ -0,0 +1,3075 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#include "grtnic.h" +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +int ixgbe_dcb_netlink_register(void) +{ + return 0; +} + +int ixgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter, int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_main(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus DMA control\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_main(struct pci_dev *dev) +{ + __kc_pci_set_main(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + int ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", pci_domain_nr(bus), + bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a allowlist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current allowlist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 3 protocol %04x\n", __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 4 protocol %02x\n", __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +int _kc_kstrtobool(const char *s, bool *res) +{ + if (!s) + return -EINVAL; + + switch (s[0]) { + case 'y': + case 'Y': + case '1': + *res = true; + return 0; + case 'n': + case 'N': + case '0': + *res = false; + return 0; + case 'o': + case 'O': + switch (s[1]) { + case 'n': + case 'N': + *res = true; + return 0; + case 'f': + case 'F': + *res = false; + return 0; + default: + break; + } + break; + default: + break; + } + + return -EINVAL; +} +#endif /* < 4.6.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif +#ifdef SPEED_200000 + case SPEED_200000: + return "200Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i/2] = (unsigned long) buf[i]; + if (++i < halfwords) + bitmap[i/2] |= ((unsigned long) buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !RHEL >= 8.2 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ + +#ifdef NEED_DEVM_KASPRINTF +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) +{ + unsigned int len; + char *p; + va_list aq; + + va_copy(aq, ap); + len = vsnprintf(NULL, 0, fmt, aq); + va_end(aq); + + p = devm_kmalloc(dev, len + 1, gfp); + if (!p) + return NULL; + + vsnprintf(p, len + 1, fmt, ap); + + return p; +} + +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = devm_kvasprintf(dev, gfp, fmt, ap); + va_end(ap); + + return p; +} +#endif /* NEED_DEVM_KASPRINTF */ + +#ifdef NEED_PCI_IOV_VF_ID +#ifdef CONFIG_PCI_IOV +/* + * Below function needs to access pci_sriov offset and stride. Since + * pci_sriov structure is defined in drivers/pci/pci.h which can not + * be included as linux kernel header file, the structure definition + * is not globally visible. + * As a result, one copy of structure definition is added. Since the + * definition is a copy, you need to make sure the kernel you want + * to backport must have exactly the same pci_sriov definition as the + * copy, otherwise you'll access wrong field offset and value. + */ + +/* Single Root I/O Virtualization */ +struct pci_sriov { + int pos; /* Capability position */ + int nres; /* Number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_VFs; /* Total VFs associated with the PF */ + u16 initial_VFs; /* Initial VFs associated with the PF */ + u16 num_VFs; /* Number of VFs available */ + u16 offset; /* First VF Routing ID offset */ + u16 stride; /* Following VF stride */ + u16 vf_device; /* VF device ID */ + u32 pgsz; /* Page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + u8 max_VF_buses; /* Max buses consumed by VFs */ + u16 driver_max_VFs; /* Max num VFs driver supports */ + struct pci_dev *dev; /* Lowest numbered PF */ + struct pci_dev *self; /* This PF */ + u32 cfg_size; /* VF config space size */ + u32 class; /* VF device */ + u8 hdr_type; /* VF header type */ + u16 subsystem_vendor; /* VF subsystem vendor */ + u16 subsystem_device; /* VF subsystem device */ + resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ + bool drivers_autoprobe; /* Auto probing of VFs by driver */ +}; + +int _kc_pci_iov_vf_id(struct pci_dev *dev) +{ + struct pci_dev *pf; + + if (!dev->is_virtfn) + return -EINVAL; + + pf = pci_physfn(dev); + return (((dev->bus->number << 8) + dev->devfn) - + ((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) / + pf->sriov->stride; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* NEED_PCI_IOV_VF_ID */ + +#ifdef NEED_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif /* NEED_MUL_U64_U64_DIV_U64 */ diff --git a/drivers/net/ethernet/guangruntong/kcompat.h b/drivers/net/ethernet/guangruntong/kcompat.h new file mode 100755 index 00000000000000..ae377eccf08c79 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat.h @@ -0,0 +1,7193 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#include "kcompat_gcc.h" +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct ixgbe_adapter +#define adapter_q_vector ixgbe_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) ({0;}) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#ifdef __CHECKER__ +/* cppcheck-suppress preprocessorErrorDirective */ +#endif /* __CHECKER__ */ +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + * + * This also lets us store an ABI value up to 65535, since it can take the + * space that would use the lower byte of the Linux version code. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 65535 +#error UTS_UBUNTU_RELEASE_ABI is larger than 65535... +#endif /* UTS_UBUNTU_RELEASE_ABI > 65535 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif /* <= 3.0.0 */ +#endif /* !UTS_UBUNTU_RELEASE_ABI */ + +/* We ignore the 3rd digit since we want to give precedence to the additional + * ABI value provided by Ubuntu. + */ +#define UBUNTU_VERSION(a,b,c,d) (((a) << 24) + ((b) << 16) + (d)) + +/* SLE_VERSION is used to generate a 3-digit encoding that can order SLE + * kernels based on their major release, service pack, and a possible + * maintenance release. + */ +#define SLE_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) + +/* The SLE_LOCALVERSION_CODE comes from a 3-digit code added as part of the + * Linux kernel version. It is extracted by the driver Makefile. This macro is + * used to generate codes for making comparisons below. + */ +#define SLE_LOCALVERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) + +#ifdef CONFIG_SUSE_KERNEL +/* Starting since at least SLE 12sp4 and SLE 15, the SUSE kernels have + * provided CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL and + * CONFIG_SUSE_AUXRELEASE. Use these to generate SLE_VERSION if available. + * Only fall back to the manual table otherwise. We expect all future versions + * of SLE kernels to include these values, so the table will remain only for + * the older releases. + */ +#ifdef CONFIG_SUSE_VERSION +#ifndef CONFIG_SUSE_PATCHLEVEL +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_PATCHLEVEL is missing" +#endif +#ifndef CONFIG_SUSE_AUXRELEASE +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_AUXRELEASE is missing" +#endif +#define SLE_VERSION_CODE SLE_VERSION(CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL, CONFIG_SUSE_AUXRELEASE) +#else +/* If we do not have the CONFIG_SUSE_VERSION configuration values, fall back + * to the following table for older releases. + */ +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#else +#error "This looks like a SUSE kernel, but it has an unrecognized local version code." +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* !CONFIG_SUSE_VERSION */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* Include definitions from the new kcompat layout */ +#include "kcompat_defs.h" + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + + +#ifdef __KLOCWORK__ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif + +#ifdef WRITE_ONCE +#undef WRITE_ONCE +#define WRITE_ONCE(x, val) ((x) = (val)) +#endif /* WRITE_ONCE */ + +#ifdef wait_event_interruptible_timeout +#undef wait_event_interruptible_timeout +#define wait_event_interruptible_timeout(wq_head, condition, timeout) ({ \ + long ret; \ + if ((condition)) \ + ret = timeout; \ + else \ + ret = 0; \ + ret; \ +}) +#endif /* wait_event_interruptible_timeout */ + +#ifdef max_t +#undef max_t +#define max_t(type, x, y) ({ \ +type __x = (x); \ +type __y = (y); \ +__x > __y ? __x : __y; \ +}) +#endif /* max_t */ + +#ifdef min_t +#undef min_t +#define min_t(type, x, y) ({ \ +type __x = (x); \ +type __y = (y); \ +__x < __y ? __x : __y; \ +}) +#endif /* min_t */ +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#ifndef IXGBE_PROCFS +#define IXGBE_PROCFS +#endif /* IXGBE_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#if IS_ENABLED(CONFIG_SYSFS) +#ifndef IXGBE_SYSFS +#define IXGBE_SYSFS +#endif /* IXGBE_SYSFS */ +#endif /* CONFIG_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef IXGBE_HWMON +#define IXGBE_HWMON +#endif /* IXGBE_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_main(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_main(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#ifndef NO_PTP_SUPPORT +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* CONFIG_PTP_1588_CLOCK */ +#endif /* !NO_PTP_SUPPORT */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, (cnt) * (size), flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,0)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12,1,0))) +/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright + * original authors. + */ +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} +#endif /* <=EL7.0 || <=SLES 12.1 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else /* >= 3.15.0 */ +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,3) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE +#define HAVE_NDO_SET_TX_MAXRATE +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#include +#endif /* >= RHEL7.3 || >= SLE12sp2 */ +#else /* >= 4.3.0 */ +#include +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, (u8 *)addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if(!defined(KYLIN_KERNEL44)) +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#endif /*#if(!defined(KYLIN_KERNEL44))*/ + +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#ifndef kstrtobool +#define kstrtobool _kc_kstrtobool +int _kc_kstrtobool(const char *s, bool *res); +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#ifndef KYLIN_KERNEL +#define HAVE_TC_SETUP_CLSFLOWER +#endif /* KYLIN_KERNEL */ +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) &&\ + !defined(KYLIN_KERNEL44) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16)) && !defined(KYLIN_KERNEL44)) +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ + +/* If kernel is older than 4.10 but distro is RHEL >= 7.5 || SLES > 12SP4, + * it does have support for NAPI_STATE + */ +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) ||\ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))) +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#endif /* RHEL >= 7.5 || SLES >=12.4 */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +/* If kernel is older than 4.12 but distro is RHEL >= 7.5 || SLES > 12SP4, + * it does have support for MIN_NAPI_ID + */ +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))) +#define HAVE_MIN_NAPI_ID +#endif /* RHEL >= 7.5 || SLES >= 12.4 */ +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#else /* >= 4.12 */ +#define HAVE_MIN_NAPI_ID +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) + +#if(!defined(KYLIN_KERNEL44)) +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#endif + +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +/* add a check for the Oracle UEK 4.14.35 kernel as + * it backported a version of this bitmap function + */ +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(4,14,35)) +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#else /* >= 4.16 */ +#include +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct +#endif /* SLES < 15.1 */ + +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif + +static inline void __kc_metadata_dst_free(void *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open + +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) || defined(KYLIN_KERNEL)) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#endif /* RHEL < 8.1 */ +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (!defined KYLIN_KERNEL) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,1)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* RHEL > 8.1 */ +#endif /*!defined KYLIN_KERNEL*/ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,2,0)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* < 6.2.0 */ +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#else /* >= 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_XSK_UMEM_HAS_ADDRS +#endif /* SLE < 15.3 */ +#endif /* < 5.8.0*/ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#if IS_ENABLED(CONFIG_DIMLIB) +#define HAVE_CONFIG_DIMLIB +#endif +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +/* add a check for the Oracle UEK 5.4.17 kernel which + * backported the rename of the aer functions + */ +#if defined(NEED_ORCL_LIN_PCI_AER_CLEAR_NONFATAL_STATUS) || \ +!(SLE_VERSION_CODE > SLE_VERSION(15, 2, 0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ +(SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14, 0, 0))) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(5,4,17)) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif +#else /* >= 5.7.0 */ +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +/* (RHEL < 8.4) || (SLE < 15.3) */ +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#elif (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +/* RHEL >= 8.4 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#endif +#define flex_array_size(p, member, count) \ + array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) +#else /* >= 5.8.0 */ +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#endif /* (RHEL >= 8.4) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#define HAVE_UDP_TUNNEL_NIC_INFO +#endif /* 5.9.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,3))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ + +/*****************************************************************************/ +#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif /* HAVE_XDP_BUFF_IN_XDP_H */ +static inline int +_kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) +#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#ifdef HAVE_NAPI_BUSY_LOOP +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +static inline void +_kc_napi_busy_loop(unsigned int napi_id, + bool (*loop_end)(void *, unsigned long), void *loop_end_arg, + bool __always_unused prefer_busy_poll, + u16 __always_unused budget) +{ + napi_busy_loop(napi_id, loop_end, loop_end_arg); +} + +#define napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll, budget) \ + _kc_napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll, budget) +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* HAVE_NAPI_BUSY_LOOP */ +#endif /* <5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,12,0)) +#define HAVE_GRO_HEADER +#endif /* >=5.12.0 */ + +/*****************************************************************************/ + +/* + * Load the implementations file which actually defines kcompat backports. + * Legacy backports still exist in this file, but all new backports must be + * implemented using kcompat_*defs.h and kcompat_impl.h + */ +#include "kcompat_impl.h" + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_defs.h b/drivers/net/ethernet/guangruntong/kcompat_defs.h new file mode 100755 index 00000000000000..6594358eef8210 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_defs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_DEFS_H_ +#define _KCOMPAT_DEFS_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#endif /* LINUX_VERSION_CODE */ + +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) //sam +#include +#else +#include +#endif +#endif + +/* + * Include the definitions file for HAVE/NEED flags for the standard upstream + * kernels. + * + * Then, based on the distribution we detect, load the distribution specific + * definitions file that customizes the definitions for the target + * distribution. + */ +#include "kcompat_std_defs.h" + +#ifdef CONFIG_SUSE_KERNEL +#include "kcompat_sles_defs.h" +#elif UBUNTU_VERSION_CODE +#include "kcompat_ubuntu_defs.h" +#elif RHEL_RELEASE_CODE +#include "kcompat_rhel_defs.h" +#elif defined(UEK_RELEASE_NUMBER) +#include "kcompat_oracle_defs.h" +#endif + +#endif /* _KCOMPAT_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_gcc.h b/drivers/net/ethernet/guangruntong/kcompat_gcc.h new file mode 100755 index 00000000000000..a619fa3628c4d8 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_gcc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2021 Intel Corporation. */ + +#ifndef _KCOMPAT_GCC_H_ +#define _KCOMPAT_GCC_H_ + +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif /* __has_attribute(fallthrough) */ +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif /* __has_attribute */ + +#endif /* _KCOMPAT_GCC_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_generated_defs.h b/drivers/net/ethernet/guangruntong/kcompat_generated_defs.h new file mode 100644 index 00000000000000..e8abd5d1cdfb6a --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_generated_defs.h @@ -0,0 +1,58 @@ +/* Autogenerated for KSRC=/lib/modules/6.6.7-amd64-desktop-hwe/build via kcompat-generator.sh */ +#ifndef _KCOMPAT_GENERATED_DEFS_H_ +#define _KCOMPAT_GENERATED_DEFS_H_ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_DEVLINK_HEALTH +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#define HAVE_DEVLINK_PARAMS +#define HAVE_DEVLINK_PORT_NEW +#define HAVE_DEVLINK_PORT_OPS +#define HAVE_DEVLINK_PORT_SPLIT +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#define HAVE_DEVLINK_PORT_SPLIT_IN_PORT_OPS +#define HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#define HAVE_DEVLINK_RATE_NODE_CREATE +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVL_HEALTH_REPORTER_DESTROY +#define HAVE_DEVL_PORT_REGISTER +#define HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_NDO_FDB_DEL_EXTACK +#define HAVE_NETDEV_MIN_MAX_MTU +#define HAVE_NETIF_SET_TSO_MAX +#define HAVE_SET_NETDEV_DEVLINK_PORT +#define HAVE_INCLUDE_BITFIELD +#define NEED_DPLL_NETDEV_PIN_SET +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_FLOW_RSS +#define HAVE_FLOW_DISSECTOR_KEY_PPPOE +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#define HAVE_CDEV_DEVICE +#define HAVE_DEV_UEVENT_CONST +#define HAVE_STREAM_OPEN +#define HAVE_GNSS_MODULE +#define HAVE_POLL_T +#define HAVE_PCI_MSIX_ALLOC_IRQ_AT +#define HAVE_PCI_MSIX_CAN_ALLOC_DYN +#define HAVE_PCI_MSIX_FREE_IRQ +#define HAVE_PER_VF_MSIX_SYSFS +#define HAVE_STRUCT_PCI_DEV_PTM_ENABLED +#define HAVE_STDDEF_OFFSETTOEND +#define HAVE_X86_STEPPING +#define HAVE_COMPLETION_RAW_SPINLOCK +#define HAVE_HWMON_DEVICE_REGISTER_WITH_INFO +#define HAVE_STRUCT_STATIC_KEY_FALSE +#define HAVE_MDEV_REGISTER_PARENT +#define HAVE_VM_FLAGS_API +#define HAVE_NL_SET_ERR_MSG_FMT +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#define NEED_SCHED_PARAM +#define HAVE_TRACE_ENABLED_SUPPORT +#define HAVE_TTY_OP_WRITE_SIZE_T +#endif /* _KCOMPAT_GENERATED_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_impl.h b/drivers/net/ethernet/guangruntong/kcompat_impl.h new file mode 100755 index 00000000000000..2700008ceab71e --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_impl.h @@ -0,0 +1,2714 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2024 Intel Corporation */ + +#ifndef _KCOMPAT_IMPL_H_ +#define _KCOMPAT_IMPL_H_ + +/* devlink support */ +#if IS_ENABLED(CONFIG_NET_DEVLINK) + +/* + * This change is adding buffer in enum value for ice_devlink_param_id. + * + * In upstream / OOT compiled from source it is safe to use + * DEVLINK_PARAM_GENERIC_ID_MAX as first value for ice_devlink_param_id + * enum. + * + * In case of binary release (for Secure Boot purpose) this caused issue + * with supporting multiple kernels because backport made by SLES changed + * value of DEVLINK_PARAM_GENERIC_ID_MAX. This caused -EINVAL to + * be returned by devlink_params_register() because + * ICE_DEVLINK_PARAM_ID_FW_MGMT_MINSREV (compiled on older kernel) was equal + * to DEVLINK_PARAM_GENERIC_ID_MAX (in newer kernel). + */ +#define DEVLINK_PARAM_GENERIC_ID_MAX __KC_DEVLINK_PARAM_GENERIC_ID_MAX +#include +#undef DEVLINK_PARAM_GENERIC_ID_MAX +#define DEVLINK_PARAM_GENERIC_ID_MAX (__KC_DEVLINK_PARAM_GENERIC_ID_MAX + 32) +#endif /* CONFIG_DEVLINK */ + +/* This file contains implementations of backports from various kernels. It + * must rely only on NEED_ and HAVE_ checks. It must not make any + * checks to determine the kernel version when deciding whether to include an + * implementation. + * + * All new implementations must go in this file, and legacy implementations + * should be migrated to the new format over time. + */ + +/* The same kcompat code is used here and auxiliary module. To avoid + * duplication and functions redefitions in some scenarios, include the + * auxiliary kcompat implementation here. + */ +#include "auxiliary_compat.h" + +/* generic network stack functions */ + +/* NEED_NETDEV_TXQ_BQL_PREFETCH + * + * functions + * netdev_txq_bql_complete_prefetchw() + * netdev_txq_bql_enqueue_prefetchw() + * + * were added in kernel 4.20 upstream commit + * 535114539bb2 ("net: add netdev_txq_bql_{enqueue, complete}_prefetchw() + * helpers") + */ +#ifdef NEED_NETDEV_TXQ_BQL_PREFETCH +/** + * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their ndo_start_xmit(), + * to give appropriate hint to the CPU. + */ +static inline +void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.num_queued); +#endif +} + +/** + * netdev_txq_bql_complete_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their TX completion path, + * to give appropriate hint to the CPU. + */ +static inline +void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.limit); +#endif +} +#endif /* NEED_NETDEV_TXQ_BQL_PREFETCH */ + +/* NEED_NETDEV_TX_SENT_QUEUE + * + * __netdev_tx_sent_queue was added in kernel 4.20 upstream commit + * 3e59020abf0f ("net: bql: add __netdev_tx_sent_queue()") + */ +#ifdef NEED_NETDEV_TX_SENT_QUEUE +/* Variant of netdev_tx_sent_queue() for drivers that are aware + * that they should not test BQL status themselves. + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last + * skb of a batch. + * Returns true if the doorbell must be used to kick the NIC. + */ +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +#endif /* NEED_NETDEV_TX_SENT_QUEUE */ + +/* NEED_NET_PREFETCH + * + * net_prefetch was introduced by commit f468f21b7af0 ("net: Take common + * prefetch code structure into a function") + * + * This function is trivial to re-implement in full. + */ +#ifdef NEED_NET_PREFETCH +static inline void net_prefetch(void *p) +{ + prefetch(p); +#if L1_CACHE_BYTES < 128 + prefetch((u8 *)p + L1_CACHE_BYTES); +#endif +} +#endif /* NEED_NET_PREFETCH */ + +/* NEED_SKB_FRAG_OFF and NEED_SKB_FRAG_OFF_ADD + * + * skb_frag_off and skb_frag_off_add were added in upstream commit + * 7240b60c98d6 ("linux: Add skb_frag_t page_offset accessors") + * + * Implementing the wrappers directly for older kernels which still have the + * old implementation of skb_frag_t is trivial. + * + * LTS 4.19 backported the define for skb_frag_off in 4.19.201. + * d94d95ae0dd0 ("gro: ensure frag0 meets IP header alignment") + * Need to exclude defining skb_frag_off for 4.19.X where X > 200 + */ +#ifdef NEED_SKB_FRAG_OFF +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} +#endif /* NEED_SKB_FRAG_OFF */ +#ifdef NEED_SKB_FRAG_OFF_ADD +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif /* NEED_SKB_FRAG_OFF_ADD */ + +/* + * NEED_DMA_ATTRS, NEED_DMA_ATTRS_PTR and related functions + * + * dma_map_page_attrs and dma_unmap_page_attrs were added in upstream commit + * 0495c3d36794 ("dma: add calls for dma_map_page_attrs and + * dma_unmap_page_attrs") + * + * Implementing these calls in this way makes RHEL7.4 compile (which doesn't + * have these) and all newer kernels compile fine, while using only the new + * calls with no ifdeffery. + * + * In kernel 4.10 the commit ("dma-mapping: use unsigned long for dma_attrs") + * switched the argument from struct dma_attrs * to unsigned long. + * + * __page_frag_cache_drain was implemented in 2017, but __page_frag_drain came + * with the above series for _attrs, and seems to have been backported at the + * same time. + * + * Please note SLES12.SP3 and RHEL7.5 and newer have all three of these + * already. + * + * If need be in the future for some reason, we could make a separate NEED_ + * define for __page_frag_cache_drain, but not yet. + * + * For clarity: there are three states: + * 1) no attrs + * 2) attrs but with a pointer to a struct dma_attrs + * 3) attrs but with unsigned long type + */ +#ifdef NEED_DMA_ATTRS +static inline +dma_addr_t __kc_dma_map_page_attrs(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#define dma_map_page_attrs __kc_dma_map_page_attrs + +static inline +void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + WARN_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#elif defined(NEED_DMA_ATTRS_PTR) +static inline +dma_addr_t __kc_dma_map_page_attrs_long(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_attrs dmaattrs = {}; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &dmaattrs); + + if (attrs & DMA_ATTR_WEAK_ORDERING) + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &dmaattrs); + + return dma_map_page_attrs(dev, page, offset, size, dir, &dmaattrs); +} +#define dma_map_page_attrs __kc_dma_map_page_attrs_long +/* there is a nasty macro buried in dma-mapping.h which reroutes dma_map_page + * and dma_unmap_page to attribute versions, so take control of that macro and + * fix it here. */ +#ifdef dma_map_page +#undef dma_map_page +#define dma_map_page(a,b,c,d,r) dma_map_page_attrs(a,b,c,d,r,0) +#endif + +static inline +void __kc_dma_unmap_page_attrs_long(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_attrs dmaattrs = {}; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &dmaattrs); + + if (attrs & DMA_ATTR_WEAK_ORDERING) + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &dmaattrs); + + dma_unmap_page_attrs(dev, addr, size, dir, &dmaattrs); +} +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs_long +#ifdef dma_unmap_page +#undef dma_unmap_page +#define dma_unmap_page(a,b,c,r) dma_unmap_page_attrs(a,b,c,r,0) +#endif +#endif /* NEED_DMA_ATTRS_PTR */ + +/* + * NETIF_F_HW_L2FW_DOFFLOAD related functions + * + * Support for NETIF_F_HW_L2FW_DOFFLOAD was first introduced upstream by + * commit a6cc0cfa72e0 ("net: Add layer 2 hardware acceleration operations for + * macvlan devices") + */ +#ifdef NETIF_F_HW_L2FW_DOFFLOAD + +#include + +/* NEED_MACVLAN_ACCEL_PRIV + * + * macvlan_accel_priv is an accessor function that replaced direct access to + * the macvlan->fwd_priv variable. It was introduced in commit 7d775f63470c + * ("macvlan: Rename fwd_priv to accel_priv and add accessor function") + * + * Implement the new wrapper name by simply accessing the older + * macvlan->fwd_priv name. + */ +#ifdef NEED_MACVLAN_ACCEL_PRIV +static inline void *macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif /* NEED_MACVLAN_ACCEL_PRIV */ + +/* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD + * + * macvlan_release_l2fw_offload was introduced upstream by commit 53cd4d8e4dfb + * ("macvlan: Provide function for interfaces to release HW offload") + * + * Implementing this is straight forward, but we must be careful to use + * fwd_priv instead of accel_priv. Note that both the change to accel_priv and + * introduction of this function happened in the same release. + */ +#ifdef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +static inline int macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif /* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD */ + +/* NEED_MACVLAN_SUPPORTS_DEST_FILTER + * + * macvlan_supports_dest_filter was introduced upstream by commit 6cb1937d4eff + * ("macvlan: Add function to test for destination filtering support") + * + * The implementation doesn't rely on anything new and is trivial to backport + * for kernels that have NETIF_F_HW_L2FW_DOFFLOAD support. + */ +#ifdef NEED_MACVLAN_SUPPORTS_DEST_FILTER +static inline bool macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif /* NEED_MACVLAN_SUPPORTS_DEST_FILTER */ + +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ + +/* tc functions */ + +/* NEED_FLOW_INDR_BLOCK_CB_REGISTER + * + * __flow_indr_block_cb_register and __flow_indr_block_cb_unregister were + * added in upstream commit 4e481908c51b ("flow_offload: move tc indirect + * block to flow offload") + * + * This was a simple rename so we can just translate from the old + * naming scheme with a macro. + */ +#ifdef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif + +/* devlink support */ +#if IS_ENABLED(CONFIG_NET_DEVLINK) + +#ifdef HAVE_DEVLINK_REGIONS +/* NEED_DEVLINK_REGION_CREATE_OPS + * + * The ops parameter to devlink_region_create was added by commit e8937681797c + * ("devlink: prepare to support region operations") + * + * For older kernels, define _kc_devlink_region_create that takes an ops + * parameter, and calls the old implementation function by extracting the name + * from the structure. + */ +#ifdef NEED_DEVLINK_REGION_CREATE_OPS +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *data); +}; + +static inline struct devlink_region * +_kc_devlink_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, u64 region_size) +{ + return devlink_region_create(devlink, ops->name, region_max_snapshots, + region_size); +} + +#define devlink_region_create _kc_devlink_region_create +#endif /* NEED_DEVLINK_REGION_CREATE_OPS */ +#endif /* HAVE_DEVLINK_REGIONS */ + +/* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY + * + * devlink_flash_update_status_notify, _begin_notify, and _end_notify were + * added by upstream commit 191ed2024de9 ("devlink: allow driver to update + * progress of flash update") + * + * For older kernels that lack the netlink messages, convert the functions + * into no-ops. + */ +#ifdef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY */ + +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; + +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif +#endif /* !HAVE_DEVLINK_FLASH_UPDATE_PARAMS */ + +/* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY + * + * devlink_flash_update_timeout_notify was added by upstream commit + * f92970c694b3 ("devlink: add timeout information to status_notify"). + * + * For older kernels, just convert timeout notifications into regular status + * notification messages without timeout information. + */ +#ifdef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +static inline void +devlink_flash_update_timeout_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long __always_unused timeout) +{ + devlink_flash_update_status_notify(devlink, status_msg, component, 0, 0); +} +#endif /* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY */ + +/* NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER + * + * Upstream commit ba7d16c77942 ("devlink: Implicitly set auto recover flag when + * registering health reporter") removed auto_recover param. + * CORE code does not need to bother about this param, we could simply provide + * it via compat. + */ +#ifdef NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +static inline struct devlink_health_reporter * +_kc_devlink_health_reporter_create(struct devlink *devlink, + const struct devlink_health_reporter_ops *ops, + u64 graceful_period, void *priv) +{ + return devlink_health_reporter_create(devlink, ops, graceful_period, + !!ops->recover, priv); +} +#define devlink_health_reporter_create _kc_devlink_health_reporter_create +#endif /* NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER */ + +/* + * NEED_DEVLINK_PORT_ATTRS_SET_STRUCT + * + * HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR + * HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID + * + * devlink_port_attrs_set was introduced by commit b9ffcbaf56d3 ("devlink: + * introduce devlink_port_attrs_set") + * + * It's function signature has changed multiple times over several kernel + * releases: + * + * commit 5ec1380a21bb ("devlink: extend attrs_set for setting port + * flavours") added the ability to set port flavour. (Note that there is no + * official kernel release with devlink_port_attrs_set without the flavour + * argument, as they were introduced in the same series.) + * + * commit bec5267cded2 ("net: devlink: extend port attrs for switch ID") added + * the ability to set the switch ID (HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID) + * + * Finally commit 71ad8d55f8e5 ("devlink: Replace devlink_port_attrs_set + * parameters with a struct") refactored to pass devlink_port_attrs struct + * instead of individual parameters. (!NEED_DEVLINK_PORT_ATTRS_SET_STRUCT) + * + * We want core drivers to just use the latest form that takes + * a devlink_port_attrs structure. Note that this structure did exist as part + * of but was never used directly by driver code prior to the + * function parameter change. For this reason, the implementation always + * relies on _kc_devlink_port_attrs instead of what was defined in the kernel. + */ +#ifdef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT + +#ifndef HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif + +struct _kc_devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct _kc_devlink_port_pci_pf_attrs { + u16 pf; +}; + +struct _kc_devlink_port_pci_vf_attrs { + u16 pf; + u16 vf; +}; + +struct _kc_devlink_port_attrs { + u8 split:1, + splittable:1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct _kc_devlink_port_phys_attrs phys; + struct _kc_devlink_port_pci_pf_attrs pci_pf; + struct _kc_devlink_port_pci_vf_attrs pci_vf; + }; +}; + +#define devlink_port_attrs _kc_devlink_port_attrs + +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ +#if defined(HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID) + devlink_port_attrs_set(devlink_port, attrs->flavour, attrs->phys.port_number, + attrs->split, attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +#elif defined(HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR) + devlink_port_attrs_set(devlink_port, attrs->flavour, attrs->phys.port_number, + attrs->split, attrs->phys.split_subport_number); +#else + if (attrs->split) + devlink_port_split_set(devlink_port, attrs->phys.port_number); +#endif +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set + +#endif /* NEED_DEVLINK_PORT_ATTRS_SET_STRUCT */ + +/* + * NEED_DEVLINK_ALLOC_SETS_DEV + * + * Since commit 919d13a7e455 ("devlink: Set device as early as possible"), the + * devlink device pointer is set by devlink_alloc instead of by + * devlink_register. + * + * devlink_alloc now includes the device pointer in its signature, while + * devlink_register no longer includes it. + * + * This implementation provides a replacement for devlink_alloc which will + * take and then silently discard the extra dev pointer. + * + * To use devlink_register, drivers must check + * HAVE_DEVLINK_REGISTER_SETS_DEV. Note that we can't easily provide + * a backport of the change to devlink_register directly. Although the dev + * pointer is accessible from the devlink pointer through the driver private + * section, it is device driver specific and is not easily accessible in + * compat code. + */ +#ifdef NEED_DEVLINK_ALLOC_SETS_DEV +static inline struct devlink * +_kc_devlink_alloc(const struct devlink_ops *ops, size_t priv_size, + struct device * __always_unused dev) +{ + return devlink_alloc(ops, priv_size); +} +#define devlink_alloc _kc_devlink_alloc +#endif /* NEED_DEVLINK_ALLOC_SETS_DEV */ + +#ifdef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#ifdef NEED_DEVLINK_UNLOCKED_RESOURCE +/* + * NEED_DEVLINK_UNLOCKED_RESOURCE + * + * Handle devlink API change introduced in: + * c223d6a4bf ("net: devlink: add unlocked variants of devlink_resource*() + * functions") + * 644a66c60f ("net: devlink: convert reload command to take implicit + * devlink->lock") + * + * devl_resource_size_get() does not take devlink->lock where + * devlink_resource_size_get() takes devlink->lock, but we do not introduce + * locking in the driver as taking the lock in devlink_reload() was added + * upstream in the same version as API change. + * + * We have to rely on distro maintainers properly backporting of both mentioned + * commits for OOT driver to work properly. + * In case of backporting only c223d6a4bf assert inside + * devl_resource_size_get() will trigger kernel WARNING, + * In case of backporting only 644a66c60f devlink_resource_size_get() will + * attempt to take the lock second time. + */ +static inline int devl_resource_size_get(struct devlink *devlink, + u64 resource_id, + u64 *p_resource_size) +{ + return devlink_resource_size_get(devlink, resource_id, p_resource_size); +} +#endif /* NEED_DEVLINK_UNLOCKED_RESOURCE */ + +#ifdef NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE +/* + * NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE + * + * Commit 4c897cfc46 ("devlink: Simplify devlink resources unregister call") + * removed struct devlink_resource *resource parameter from + * devlink_resources_unregister() function, if NULL is passed as a resource + * parameter old version of devlink_resources_unregister() behaves the same + * way as new implementation removing all resources from: + * &devlink->resource_list. + */ +static inline void +_kc_devlink_resources_unregister(struct devlink *devlink) +{ + return devlink_resources_unregister(devlink, NULL); +} + +#define devlink_resources_unregister _kc_devlink_resources_unregister +#endif /* NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE */ +#endif /* HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT */ + +#ifdef NEED_DEVLINK_TO_DEV +/* + * Commit 2131463 ("devlink: Reduce struct devlink exposure") + * removed devlink struct fields from header to avoid exposure + * and added devlink_to_dev and related functions to access + * them instead. + */ +static inline struct device * +devlink_to_dev(const struct devlink *devlink) +{ + return devlink->dev; +} +#endif /* NEED_DEVLINK_TO_DEV */ + +#endif /* CONFIG_NET_DEVLINK */ + +#ifdef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +/* ida_alloc(), ida_alloc_min(), ida_alloc_max(), ida_alloc_range(), and + * ida_free() were added in commit 5ade60dda43c ("ida: add new API"). + * + * Also, using "0" as the "end" argument (3rd argument) to ida_simple_get() is + * considered the max value, which is why it's used in ida_alloc() and + * ida_alloc_min(). + */ +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_simple_get(ida, 0, 0, gfp); +} + +static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) +{ + return ida_simple_get(ida, min, 0, gfp); +} + +static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, 0, max, gfp); +} + +static inline int +ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, min, max, gfp); +} + +static inline void ida_free(struct ida *ida, unsigned int id) +{ + ida_simple_remove(ida, id); +} +#endif /* NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE */ + +/* dev_printk implementations */ + +/* NEED_DEV_LEVEL_ONCE + * + * The dev_*_once family of printk functions was introduced by commit + * e135303bd5be ("device: Add dev__once variants") + * + * The implementation is very straight forward so we will just implement them + * as-is here. + * + * Note that this assumes all dev_*_once macros exist if dev_level_once was + * found. + */ +#ifdef NEED_DEV_LEVEL_ONCE +#ifdef CONFIG_PRINTK +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + dev_level(dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) +#else +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + if (0) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ +} while (0) +#endif + +#define dev_emerg_once(dev, fmt, ...) \ + dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_once(dev, fmt, ...) \ + dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_once(dev, fmt, ...) \ + dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_once(dev, fmt, ...) \ + dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_once(dev, fmt, ...) \ + dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_once(dev, fmt, ...) \ + dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_once(dev, fmt, ...) \ + dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) +#define dev_dbg_once(dev, fmt, ...) \ + dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) +#endif /* NEED_DEV_LEVEL_ONCE */ + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO + +/* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 + * + * tc_cls_can_offload_and_chain0 was added by upstream commit + * 878db9f0f26d ("pkt_cls: add new tc cls helper to check offload flag and + * chain index"). + * + * This patch backports this function for older kernels by calling + * tc_can_offload() directly. + */ +#ifdef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 */ +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + +/* NEED_TC_SETUP_QDISC_MQPRIO + * + * TC_SETUP_QDISC_MQPRIO was added by upstream commit + * 575ed7d39e2f ("net_sch: mqprio: Change TC_SETUP_MQPRIO to + * TC_SETUP_QDISC_MQPRIO"). + * + * For older kernels which are using TC_SETUP_MQPRIO + */ +#ifdef NEED_TC_SETUP_QDISC_MQPRIO +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* NEED_TC_SETUP_QDISC_MQPRIO */ + +/* ART/TSC functions */ +#ifdef HAVE_PTP_CROSSTIMESTAMP +/* NEED_CONVERT_ART_NS_TO_TSC + * + * convert_art_ns_to_tsc was added by upstream commit fc804f65d462 ("x86/tsc: + * Convert ART in nanoseconds to TSC"). + * + * This function is similar to convert_art_to_tsc, but expects the input in + * terms of nanoseconds, rather than ART cycles. We implement this by + * accessing the tsc_khz value and performing the proper calculation. In order + * to access the correct clock object on returning, we use the function + * convert_art_to_tsc, because the art_related_clocksource is inaccessible. + */ +#if defined(CONFIG_X86) && defined(NEED_CONVERT_ART_NS_TO_TSC) +#include + +static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) +{ + struct system_counterval_t system; + u64 tmp, res, rem; + + rem = do_div(art_ns, USEC_PER_SEC); + + res = art_ns * tsc_khz; + tmp = rem * tsc_khz; + + do_div(tmp, USEC_PER_SEC); + res += tmp; + + system = convert_art_to_tsc(art_ns); + system.cycles = res; + + return system; +} +#endif /* CONFIG_X86 && NEED_CONVERT_ART_NS_TO_TSC */ +#endif /* HAVE_PTP_CROSSTIMESTAMP */ + +/* PTP functions and definitions */ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#include +#include + +/* PTP_* ioctl flags + * + * PTP_PEROUT_ONE_SHOT and PTP_PEROUT_DUTY_CYCLE were added by commit + * f65b71aa25a6 ("ptp: add ability to configure duty cycle for periodic + * output") + * + * PTP_PEROUT_PHASE was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") + * + * PTP_STRICT_FLAGS was added in commit 6138e687c7b6 ("ptp: Introduce strict + * checking of external time stamp options.") + * + * These flags control behavior for the periodic output PTP ioctl. For older + * kernels, we define the flags as 0. This allows bitmask checks on flags to + * work as expected, since these feature flags will become no-ops on kernels + * that lack support. + * + * Drivers can check if the relevant feature is actually supported by using an + * '#if' on the flag instead of an '#ifdef' + */ +#ifndef PTP_PEROUT_PHASE +#define PTP_PEROUT_PHASE 0 +#endif + +#ifndef PTP_PEROUT_DUTY_CYCLE +#define PTP_PEROUT_DUTY_CYCLE 0 +#endif + +#ifndef PTP_STRICT_FLAGS +#define PTP_STRICT_FLAGS 0 +#endif + +#ifndef PTP_PEROUT_PHASE +/* PTP_PEROUT_PHASE + * + * The PTP_PEROUT_PHASE flag was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") as a way for userspace to + * request a phase-offset periodic output that starts on some arbitrary + * multiple of the clock period. + * + * For older kernels, define this flag to 0 so that checks for if it is + * enabled will always fail. Drivers should use '#if PTP_PEROUT_PHASE' to + * determine if the kernel has phase support, and use the flag as normal for + * checking supported flags or if the flag is enabled for a given request. + */ +#define PTP_PEROUT_PHASE 0 +#endif + +#endif /* CONFIG_PTP_1588_CLOCK */ + +/* + * NEED_PTP_SYSTEM_TIMESTAMP + * + * Upstream commit 361800876f80 ("ptp: add PTP_SYS_OFFSET_EXTENDED + * ioctl") introduces new ioctl, driver and helper functions. + * + * Required for PhotonOS 3.0 to correctly support backport of + * PTP patches introduced in Linux Kernel version 5.0 on 4.x kernels + */ +#ifdef NEED_PTP_SYSTEM_TIMESTAMP +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp *sts) { } + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp *sts) { } +#endif /* !NEED_PTP_SYSTEM_TIMESTAMP */ + +#ifdef NEED_PTP_CLASSIFY_RAW +/* NEED_PTP_CLASSIFY_RAW + * + * The ptp_classify_raw() function was introduced into + * as part of commit 164d8c666521 ("net: ptp: do not reimplement PTP/BPF + * classifier"). + * + * The kernel does provide the classifier BPF program since commit + * 15f0127d1d18 ("net: added a BPF to help drivers detect PTP packets."). + * However, it requires initializing the BPF filter properly and that varies + * depending on the kernel version. + * + * The only current uses for this function in our drivers is to enhance + * debugging messages. Rather than re-implementing the function just return + * PTP_CLASS_NONE indicating that it could not identify any PTP frame. + */ +#include + +static inline unsigned int ptp_classify_raw(struct sk_buff *skb) +{ + return PTP_CLASS_NONE; +} +#endif /* NEED_PTP_CLASSIFY_RAW */ + +#ifdef NEED_PTP_PARSE_HEADER +/* NEED_PTP_PARSE_HEADER + * + * The ptp_parse_header() function was introduced upstream in commit + * bdfbb63c314a ("ptp: Add generic ptp v2 header parsing function"). + * + * Since it is straight forward to implement, do so. + */ +#include + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; /* transportSpecific | messageType */ + u8 ver; /* reserved | versionPTP */ + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __packed; + +static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb, + unsigned int type) +{ +#if defined(CONFIG_NET_PTP_CLASSIFY) + u8 *ptr = skb_mac_header(skb); + + if (type & PTP_CLASS_VLAN) + ptr += VLAN_HLEN; + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_IPV4: + ptr += IPV4_HLEN(ptr) + UDP_HLEN; + break; + case PTP_CLASS_IPV6: + ptr += IP6_HLEN + UDP_HLEN; + break; + case PTP_CLASS_L2: + break; + default: + return NULL; + } + + ptr += ETH_HLEN; + + /* Ensure that the entire header is present in this packet. */ + if (ptr + sizeof(struct ptp_header) > skb->data + skb->len) + return NULL; + + return (struct ptp_header *)ptr; +#else + return NULL; +#endif +} +#endif /* NEED_PTP_PARSE_HEADER */ + +#ifdef NEED_CPU_LATENCY_QOS_RENAME +/* NEED_CPU_LATENCY_QOS_RENAME + * + * The PM_QOS_CPU_DMA_LATENCY definition was removed in 67b06ba01857 ("PM: + * QoS: Drop PM_QOS_CPU_DMA_LATENCY and rename related functions"). The + * related functions were renamed to use "cpu_latency_qos_" prefix. + * + * Use wrapper functions to map the new API onto the API available in older + * kernels. + */ +#include +static inline void +cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value) +{ + pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, value); +} + +static inline void +cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value) +{ + pm_qos_update_request(req, new_value); +} + +static inline void +cpu_latency_qos_remove_request(struct pm_qos_request *req) +{ + pm_qos_remove_request(req); +} +#endif /* NEED_CPU_LATENCY_QOS_RENAME */ + +#ifdef NEED_DECLARE_STATIC_KEY_FALSE +/* NEED_DECLARE_STATIC_KEY_FALSE + * + * DECLARE_STATIC_KEY_FALSE was added by upstream commit b8fb03785d4d + * ("locking/static_keys: Provide DECLARE and well as DEFINE macros") + * + * The definition is now necessary to handle the xdpdrv work with more than 64 + * cpus + */ +#ifdef HAVE_STRUCT_STATIC_KEY_FALSE +#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key_false name +#else +#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key name +#endif /* HAVE_STRUCT_STATIC_KEY_FALSE */ +#endif /* NEED_DECLARE_STATIC_KEY_FALSE */ + +#ifdef NEED_DEFINE_STATIC_KEY_FALSE +/* NEED_DEFINE_STATIC_KEY_FALSE + * + * DEFINE_STATIC_KEY_FALSE was added by upstream commit 11276d5306b8 + * ("locking/static_keys: Add a new static_key interface") + * + * The definition is now necessary to handle the xdpdrv work with more than 64 + * cpus + */ +#define DEFINE_STATIC_KEY_FALSE(name) \ + struct static_key name = STATIC_KEY_INIT_FALSE +#endif /* NEED_DEFINE_STATIC_KEY_FALSE */ + +#ifdef NEED_STATIC_BRANCH_LIKELY +/* NEED_STATIC_BRANCH_LIKELY + * + * static_branch_likely, static_branch_unlikely, + * static_branch_inc, static_branch_dec was added by upstream commit + * 11276d5306b8 ("locking/static_keys: Add a new + * static_key interface") + * + * The definition is now necessary to handle the xdpdrv work with more than 64 + * cpus + * + * Note that we include all four definitions if static_branch_likely cannot be + * found in . + */ +#define static_branch_likely(x) likely(static_key_enabled(x)) +#define static_branch_unlikely(x) unlikely(static_key_enabled(x)) + +#define static_branch_inc(x) static_key_slow_inc(x) +#define static_branch_dec(x) static_key_slow_dec(x) + +#endif /* NEED_STATIC_BRANCH_LIKELY */ + +/* PCI related stuff */ + +/* NEED_PCI_AER_CLEAR_NONFATAL_STATUS + * + * 894020fdd88c ("PCI/AER: Rationalize error status register clearing") has + * renamed pci_cleanup_aer_uncorrect_error_status to more sane name. + */ +#ifdef NEED_PCI_AER_CLEAR_NONFATAL_STATUS +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif /* NEED_PCI_AER_CLEAR_NONFATAL_STATUS */ + +#ifdef NEED_NETDEV_XDP_STRUCT +#define netdev_bpf netdev_xdp +#endif /* NEED_NETDEV_XDP_STRUCT */ + +#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#ifdef HAVE_XDP_SUPPORT +#include +static inline void +_kc_bpf_warn_invalid_xdp_action(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + _kc_bpf_warn_invalid_xdp_action(dev, prog, act) +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */ + +/* NEED_ETH_HW_ADDR_SET + * + * eth_hw_addr_set was added by upstream commit + * 48eab831ae8b ("net: create netdev->dev_addr assignment helpers") + * + * Using eth_hw_addr_set became required in 5.17, when the dev_addr field in + * the netdev struct was constified. See 48eab831ae8b ("net: create + * netdev->dev_addr assignment helpers") + */ +#ifdef NEED_ETH_HW_ADDR_SET +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif /* NEED_ETH_HW_ADDR_SET */ + +#ifdef NEED_JIFFIES_64_TIME_IS_MACROS +/* NEED_JIFFIES_64_TIME_IS_MACROS + * + * The jiffies64 time_is_* macros were introduced upstream by 3740dcdf8a77 + * ("jiffies: add time comparison functions for 64 bit jiffies") in Linux 4.9. + * + * Support for 64-bit jiffies has been available since the initial import of + * Linux into git in 2005, so its safe to just implement the macros as-is + * here. + */ +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a) +#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a) +#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a) +#endif /* NEED_JIFFIES_64_TIME_IS_MACROS */ + +#ifdef NEED_INDIRECT_CALL_WRAPPER_MACROS +/* NEED_INDIRECT_CALL_WRAPPER_MACROS + * + * The INDIRECT_CALL_* macros were introduced upstream as upstream commit + * 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls + * of builtin") which landed in Linux 5.0 + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_1(f, f1, ...) \ + ({ \ + likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \ + }) +#define INDIRECT_CALL_2(f, f2, f1, ...) \ + ({ \ + likely(f == f2) ? f2(__VA_ARGS__) : \ + INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ + }) + +#define INDIRECT_CALLABLE_DECLARE(f) f +#define INDIRECT_CALLABLE_SCOPE +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALLABLE_DECLARE(f) +#define INDIRECT_CALLABLE_SCOPE static +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_WRAPPER_MACROS */ + +#ifdef NEED_INDIRECT_CALL_3_AND_4 +/* NEED_INDIRECT_CALL_3_AND_4 + * Support for the 3 and 4 call variants was added in upstream commit + * e678e9ddea96 ("indirect_call_wrapper: extend indirect wrapper to support up + * to 4 calls") + * + * These are easy to implement directly. + */ + +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_3_AND_4 */ + +#ifdef NEED_EXPORT_INDIRECT_CALLABLE +/* NEED_EXPORT_INDIRECT_CALLABLE + * + * Support for EXPORT_INDIRECT_CALLABLE was added in upstream commit + * 0053859496ba ("net: add EXPORT_INDIRECT_CALLABLE wrapper") + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f) +#else +#define EXPORT_INDIRECT_CALLABLE(f) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_EXPORT_INDIRECT_CALLABLE */ + +/* NEED_DEVM_KASPRINTF and NEED_DEVM_KVASPRINTF + * + * devm_kvasprintf and devm_kasprintf were added by commit + * 75f2a4ead5d5 ("devres: Add devm_kasprintf and devm_kvasprintf API") + * in Linux 3.17. + */ +#ifdef NEED_DEVM_KVASPRINTF +__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, + const char *fmt, va_list ap); +#endif /* NEED_DEVM_KVASPRINTF */ + +#ifdef NEED_DEVM_KASPRINTF +__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, + const char *fmt, ...); +#endif /* NEED_DEVM_KASPRINTF */ + +#ifdef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif + +#ifdef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void +_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +#endif /* NEED_XSK_BUFF_DMA_SYNC_FOR_CPU */ + +#ifdef NEED_XSK_BUFF_POOL_RENAME +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#endif /* NEED_XSK_BUFF_POOL_RENAME */ + +#ifdef NEED_PCI_IOV_VF_ID +/* NEED_PCI_IOV_VF_ID + * + * pci_iov_vf_id were added by commit 21ca9fb62d468 ("PCI/IOV: + * Add pci_iov_vf_id() to get VF index") in Linux 5.18 + */ +int _kc_pci_iov_vf_id(struct pci_dev *dev); +#define pci_iov_vf_id _kc_pci_iov_vf_id +#endif /* NEED_PCI_IOV_VF_ID */ + +/* NEED_MUL_U64_U64_DIV_U64 + * + * mul_u64_u64_div_u64 was introduced in Linux 5.9 as part of commit + * 3dc167ba5729 ("sched/cputime: Improve cputime_adjust()") + */ +#ifdef NEED_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); +#endif /* NEED_MUL_U64_U64_DIV_U64 */ + +#ifndef HAVE_LINKMODE +static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} +#endif /* !HAVE_LINKMODE */ + +#ifndef ETHTOOL_GLINKSETTINGS +/* Link mode bit indices */ +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + + /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit + * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* + * macro for bits > 31. The only way to use indices > 31 is to + * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. + */ + + __ETHTOOL_LINK_MODE_LAST + = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, +}; +#endif /* !ETHTOOL_GLINKSETTINGS */ + +#if defined(NEED_FLOW_MATCH) && defined(HAVE_TC_SETUP_CLSFLOWER) +/* NEED_FLOW_MATCH + * + * flow_match*, FLOW_DISSECTOR_MATCH, flow_rule*, flow_rule_match_key, and + * tc_cls_flower_offload_flow_rule were added by commit + * 8f2566225ae2 ("flow_offload: add flow_rule and flow_match structures and use + * them") in Linux 5.1. + */ + +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif /* HAVE_TC_FLOWER_VLAN_IN_TAGS */ + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif /* HAVE_TC_FLOWER_ENC */ + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +static inline void +flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +static inline void +flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +static inline void +flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +static inline void +flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif /* HAVE_TC_FLOWER_VLAN_IN_TAGS */ + +static inline void +flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +static inline void +flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +static inline void +flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +static inline void +flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +static inline void +flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP +static inline void +flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +static inline void +flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +static inline void +flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} +#endif /* HAVE_TC_FLOWER_ENC */ +#endif /* NEED_FLOW_MATCH && HAVE_TC_SETUP_CLSFLOWER */ + +/* bitfield / bitmap */ + +/* NEED_BITMAP_COPY_CLEAR_TAIL + * + * backport + * c724f193619c ("bitmap: new bitmap_copy_safe and bitmap_{from,to}_arr32") + */ +#ifdef NEED_BITMAP_COPY_CLEAR_TAIL +/* Copy bitmap and clear tail bits in last word */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* NEED_BITMAP_COPY_CLEAR_TAIL */ + +/* NEED_BITMAP_FROM_ARR32 + * + * backport + * c724f193619c ("bitmap: new bitmap_copy_safe and bitmap_{from,to}_arr32") + */ +#ifdef NEED_BITMAP_FROM_ARR32 +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +static inline void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i/2] = (unsigned long) buf[i]; + if (++i < halfwords) + bitmap[i/2] |= ((unsigned long) buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#else /* BITS_PER_LONG == 64 */ +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_BITMAP_FROM_ARR32 */ + +/* NEED_BITMAP_TO_ARR32 + * + * backport + * c724f193619c ("bitmap: new bitmap_copy_safe and bitmap_{from,to}_arr32") + */ +#ifdef NEED_BITMAP_TO_ARR32 +#if BITS_PER_LONG == 64 +/** + * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits + * @buf: array of u32 (in host byte order), the dest bitmap + * @bitmap: array of unsigned longs, the source bitmap + * @nbits: number of bits in @bitmap + */ +static inline void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + buf[i] = (u32) (bitmap[i/2] & UINT_MAX); + if (++i < halfwords) + buf[i] = (u32) (bitmap[i/2] >> 32); + } + + /* Clear tail bits in last element of array beyond nbits. */ + if (nbits % BITS_PER_LONG) + buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); +} +#else +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#define bitmap_to_arr32(buf, bitmap, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (buf), \ + (const unsigned long *) (bitmap), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_BITMAP_TO_ARR32 */ + +#ifndef HAVE_INCLUDE_BITFIELD +/* linux/bitfield.h has been added in Linux 4.9 in upstream commit + * 3e9b3112ec74 ("add basic register-field manipulation macros") + */ +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ + ({ \ + BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ + _pfx "mask is not constant"); \ + BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \ + BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ + ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ + _pfx "value too large for the field"); \ + BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ + _pfx "type of reg too small for mask"); \ + __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ + (1ULL << __bf_shf(_mask))); \ + }) + +/** + * FIELD_PREP() - prepare a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_val: value to put in the field + * + * FIELD_PREP() masks and shifts up the value. The result should + * be combined with other fields of the bitfield using logical OR. + */ +#define FIELD_PREP(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ + ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET(_mask, _reg) \ + ({ \ + __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + }) +#endif /* HAVE_INCLUDE_BITFIELD */ + +#ifdef NEED_BITFIELD_FIELD_MAX +/* linux/bitfield.h has FIELD_MAX added to it in Linux 5.7 in upstream + * commit e31a50162feb ("bitfield.h: add FIELD_MAX() and field_max()") + */ +/** + * FIELD_MAX() - produce the maximum value representable by a field + * @_mask: shifted mask defining the field's length and position + * + * FIELD_MAX() returns the maximum value that can be held in the field + * specified by @_mask. + */ +#define FIELD_MAX(_mask) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \ + (typeof(_mask))((_mask) >> __bf_shf(_mask)); \ + }) +#endif /* HAVE_BITFIELD_FIELD_MAX */ + +#ifdef NEED_BITFIELD_FIELD_FIT +/** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) +#endif /* NEED_BITFIELD_FIELD_FIT */ + +#ifdef NEED_BITFIELD_FIELD_MASK +/** + * linux/bitfield.h has field_mask() along with *_encode_bits() in 4.16: + * 00b0c9b82663 ("Add primitives for manipulating bitfields both in host and fixed-endian.") + * + */ +extern void __compiletime_error("value doesn't fit into mask") +__field_overflow(void); +extern void __compiletime_error("bad bitfield mask") +__bad_mask(void); +static __always_inline u64 field_multiplier(u64 field) +{ + if ((field | (field - 1)) & ((field | (field - 1)) + 1)) + __bad_mask(); + return field & -field; +} +static __always_inline u64 field_mask(u64 field) +{ + return field / field_multiplier(field); +} +#define ____MAKE_OP(type,base,to,from) \ +static __always_inline __##type type##_encode_bits(base v, base field) \ +{ \ + if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ + __field_overflow(); \ + return to((v & field_mask(field)) * field_multiplier(field)); \ +} \ +static __always_inline __##type type##_replace_bits(__##type old, \ + base val, base field) \ +{ \ + return (old & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline void type##p_replace_bits(__##type *p, \ + base val, base field) \ +{ \ + *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline base type##_get_bits(__##type v, base field) \ +{ \ + return (from(v) & field)/field_multiplier(field); \ +} +#define __MAKE_OP(size) \ + ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ + ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ + ____MAKE_OP(u##size,u##size,,) +__MAKE_OP(16) +__MAKE_OP(32) +__MAKE_OP(64) +#undef __MAKE_OP +#undef ____MAKE_OP +#endif + +#ifdef NEED_BUILD_BUG_ON +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) + +/** + * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied + * error message. + * @condition: the condition which the compiler should know is false. + * + * See BUILD_BUG_ON for description. + */ +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* NEED_BUILD_BUG_ON */ + +#ifdef NEED_IN_TASK +#define in_hardirq() (hardirq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#define in_task() (!(in_nmi() | in_hardirq() | \ + in_serving_softirq())) +#endif /* NEED_IN_TASK */ + +/* + * NEED_NETIF_NAPI_ADD_NO_WEIGHT + * + * Upstream commit b48b89f9c189 ("net: drop the weight argument from + * netif_napi_add") removes weight argument from function call. + * + * Our drivers always used default weight, which is 64. + * + * Define NEED_NETIF_NAPI_ADD_NO_WEIGHT on kernels 3.10+ to use old + * implementation. Undef for 6.1+ where new function was introduced. + * RedHat 9.2 required using no weight parameter option. + */ +#ifdef NEED_NETIF_NAPI_ADD_NO_WEIGHT +static inline void +_kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int)) +{ + return netif_napi_add(dev, napi, poll, NAPI_POLL_WEIGHT); +} + +/* RHEL7 complains about redefines. Undef first, then define compat wrapper */ +#ifdef netif_napi_add +#undef netif_napi_add +#endif +#define netif_napi_add _kc_netif_napi_add +#endif /* NEED_NETIF_NAPI_ADD_NO_WEIGHT */ + +/* + * NEED_ETHTOOL_SPRINTF + * + * Upstream commit 7888fe53b706 ("ethtool: Add common function for filling out + * strings") introduced ethtool_sprintf, which landed in Linux v5.13 + * + * The function implementation is moved to kcompat.c since the compiler + * complains it can never be inlined for the function with variable argument + * lists. + */ +#ifdef NEED_ETHTOOL_SPRINTF +__printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...); +#endif /* NEED_ETHTOOL_SPRINTF */ + +/* + * NEED_SYSFS_MATCH_STRING + * + * Upstream commit e1fe7b6a7b37 ("lib/string: add sysfs_match_string helper") + * introduced a helper for looking up strings in an array - it's pure algo stuff + * that is easy to backport if needed. + * Instead of covering sysfs_streq() by yet another flag just copy it. + */ +#ifdef NEED_SYSFS_MATCH_STRING +/* + * sysfs_streq - return true if strings are equal, modulo trailing newline + * @s1: one string + * @s2: another string + * + * This routine returns true iff two strings are equal, treating both + * NUL and newline-then-NUL as equivalent string terminations. It's + * geared for use with sysfs input strings, which generally terminate + * with newlines but are compared against values without newlines. + */ +static inline bool _kc_sysfs_streq(const char *s1, const char *s2) +{ + while (*s1 && *s1 == *s2) { + s1++; + s2++; + } + + if (*s1 == *s2) + return true; + if (!*s1 && *s2 == '\n' && !s2[1]) + return true; + if (*s1 == '\n' && !s1[1] && !*s2) + return true; + return false; +} + +/* + * __sysfs_match_string - matches given string in an array + * @array: array of strings + * @n: number of strings in the array or -1 for NULL terminated arrays + * @str: string to match with + * + * Returns index of @str in the @array or -EINVAL, just like match_string(). + * Uses sysfs_streq instead of strcmp for matching. + * + * This routine will look for a string in an array of strings up to the + * n-th element in the array or until the first NULL element. + * + * Historically the value of -1 for @n, was used to search in arrays that + * are NULL terminated. However, the function does not make a distinction + * when finishing the search: either @n elements have been compared OR + * the first NULL element was found. + */ +static inline int _kc___sysfs_match_string(const char * const *array, size_t n, + const char *str) +{ + const char *item; + int index; + + for (index = 0; index < n; index++) { + item = array[index]; + if (!item) + break; + if (sysfs_streq(item, str)) + return index; + } + + return -EINVAL; +} + +#define sysfs_match_string(_a, _s) \ + _kc___sysfs_match_string(_a, ARRAY_SIZE(_a), _s) + +#endif /* NEED_SYSFS_MATCH_STRING */ + +/* + * NEED_SYSFS_EMIT + * + * Upstream introduced following function in + * commit 2efc459d06f1 ("sysfs: Add sysfs_emit and sysfs_emit_at to format sysfs output") + * + * The function implementation is moved to kcompat.c since the compiler + * complains it can never be inlined for the function with variable argument + * lists. + */ +#ifdef NEED_SYSFS_EMIT +__printf(2, 3) int sysfs_emit(char *buf, const char *fmt, ...); +#endif /* NEED_SYSFS_EMIT */ + +/* + * HAVE_U64_STATS_FETCH_BEGIN_IRQ + * HAVE_U64_STATS_FETCH_RETRY_IRQ + * + * Upstream commit 44b0c2957adc ("u64_stats: Streamline the implementation") + * marks u64_stats_fetch_begin_irq() and u64_stats_fetch_retry_irq() + * as obsolete. Their functionality is combined with: u64_stats_fetch_begin() + * and u64_stats_fetch_retry(). + * + * Upstream commit dec5efcffad4 ("u64_stat: Remove the obsolete fetch_irq() + * variants.") removes u64_stats_fetch_begin_irq() and + * u64_stats_fetch_retry_irq(). + * + * Map u64_stats_fetch_begin() and u64_stats_fetch_retry() to the _irq() + * variants on the older kernels to allow the same driver code working on + * both old and new kernels. + */ +#ifdef HAVE_U64_STATS_FETCH_BEGIN_IRQ +#define u64_stats_fetch_begin _kc_u64_stats_fetch_begin + +static inline unsigned int +_kc_u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ + return u64_stats_fetch_begin_irq(syncp); +} +#endif /* HAVE_U64_STATS_FETCH_BEGIN_IRQ */ + +#ifdef HAVE_U64_STATS_FETCH_RETRY_IRQ +#define u64_stats_fetch_retry _kc_u64_stats_fetch_retry + +static inline bool +_kc_u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ + return u64_stats_fetch_retry_irq(syncp, start); +} +#endif /* HAVE_U64_STATS_FETCH_RETRY_IRQ */ + +/* + * NEED_U64_STATS_READ + * NEED_U64_STATS_SET + * + * Upstream commit 316580b69d0 ("u64_stats: provide u64_stats_t type") + * introduces the u64_stats_t data type and other helper APIs to read, + * add and increment the stats, in Linux v5.5. Support them on older kernels + * as well. + * + * Upstream commit f2efdb179289 ("u64_stats: Introduce u64_stats_set()") + * introduces u64_stats_set API to set the u64_stats_t variable with the + * value provided, in Linux v5.16. Add support for older kernels. + */ +#ifdef NEED_U64_STATS_READ +#if BITS_PER_LONG == 64 +#include + +typedef struct { + local64_t v; +} u64_stats_t; + +static inline u64 u64_stats_read(u64_stats_t *p) +{ + return local64_read(&p->v); +} + +static inline void u64_stats_add(u64_stats_t *p, unsigned long val) +{ + local64_add(val, &p->v); +} + +static inline void u64_stats_inc(u64_stats_t *p) +{ + local64_inc(&p->v); +} +#else +typedef struct { + u64 v; +} u64_stats_t; + +static inline u64 u64_stats_read(u64_stats_t *p) +{ + return p->v; +} + +static inline void u64_stats_add(u64_stats_t *p, unsigned long val) +{ + p->v += val; +} + +static inline void u64_stats_inc(u64_stats_t *p) +{ + p->v++; +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_U64_STATS_READ */ + +#ifdef NEED_U64_STATS_SET +#if BITS_PER_LONG == 64 +static inline void u64_stats_set(u64_stats_t *p, u64 val) +{ + local64_set(&p->v, val); +} +#else +static inline void u64_stats_set(u64_stats_t *p, u64 val) +{ + p->v = val; +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_U64_STATS_SET */ + +/* + * NEED_DEVM_KFREE + * NEED_DEVM_KZALLOC + * + * Upstream commit 9ac7849e35f7 ("devres: device resource management") + * Implement device resource management to allocate and free the resource + * for driver + */ +#ifdef NEED_DEVM_KFREE +#define devm_kfree(dev, p) kfree(p) +#else +/* Since commit 0571967dfb5d ("devres: constify p in devm_kfree()") the + * devm_kfree function has accepted a const void * parameter. Since commit + * cad064f1bd52 ("devres: handle zero size in devm_kmalloc()"), it has also + * accepted a NULL pointer safely. However, the null pointer acceptance is in + * devres.c and thus cannot be checked by kcompat-generator.sh. To handle + * this, unconditionally replace devm_kfree with a variant that both accepts + * a const void * pointer and handles a NULL value correctly. + */ +static inline void _kc_devm_kfree(struct device *dev, const void *p) +{ + if (p) + devm_kfree(dev, (void *)p); +} +#define devm_kfree _kc_devm_kfree +#endif /* NEED_DEVM_KFREE */ + +#ifdef NEED_DEVM_KZALLOC +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#endif /* NEED_DEVM_KZALLOC */ + +/* NEED_DIFF_BY_SCALED_PPM + * + * diff_by_scaled_ppm and adjust_by_scaled_ppm were introduced in + * kernel 6.1 by upstream commit 1060707e3809 ("ptp: introduce helpers + * to adjust by scaled parts per million"). + */ +#ifdef NEED_DIFF_BY_SCALED_PPM +static inline bool +diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff) +{ + bool negative = false; + + if (scaled_ppm < 0) { + negative = true; + scaled_ppm = -scaled_ppm; + } + + *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm, + 1000000ULL << 16); + + return negative; +} + +static inline u64 +adjust_by_scaled_ppm(u64 base, long scaled_ppm) +{ + u64 diff; + + if (diff_by_scaled_ppm(base, scaled_ppm, &diff)) + return base - diff; + + return base + diff; +} +#endif /* NEED_DIFF_BY_SCALED_PPM */ + +#ifndef HAVE_PCI_MSIX_CAN_ALLOC_DYN +static inline bool pci_msix_can_alloc_dyn(struct pci_dev __always_unused *dev) +{ + return false; +} +#endif /* !HAVE_PCI_MSIX_CAN_ALLOC_DYN */ + +#if !defined(HAVE_PCI_MSIX_ALLOC_IRQ_AT) && !defined(HAVE_PCI_MSIX_FREE_IRQ) +struct msi_map { + int index; + int virq; +}; +#endif /* !HAVE_PCI_MSIX_ALLOC_IRQ_AT && !HAVE_PCI_MSIX_FREE_IRQ */ + +#ifndef HAVE_PCI_MSIX_ALLOC_IRQ_AT +#define MSI_ANY_INDEX UINT_MAX +struct irq_affinity_desc; + +static inline struct msi_map +pci_msix_alloc_irq_at(struct pci_dev __always_unused *dev, + unsigned int __always_unused index, + const struct irq_affinity_desc __always_unused *affdesc) +{ + struct msi_map map = { .index = -ENOTSUPP }; + return map; +} +#endif /* !HAVE_PCI_MSIX_ALLOC_IRQ_AT */ + +#ifndef HAVE_PCI_MSIX_FREE_IRQ +static inline void +pci_msix_free_irq(struct pci_dev __always_unused *dev, + struct msi_map __always_unused map) +{ +} +#endif /* !HAVE_PCI_MSIX_FREE_IRQ */ + +#ifdef NEED_PCIE_PTM_ENABLED +/* NEED_PCIE_PTM_ENABLED + * + * pcie_ptm_enabled was added by upstream commit 014408cd624e + * ("PCI: Add pcie_ptm_enabled()"). + * + * It is easy to implement directly. + */ +static inline bool pcie_ptm_enabled(struct pci_dev *dev) +{ +#if defined(HAVE_STRUCT_PCI_DEV_PTM_ENABLED) && defined(CONFIG_PCIE_PTM) + if (!dev) + return false; + + return dev->ptm_enabled; +#else /* !HAVE_STRUCT_PCI_DEV_PTM_ENABLED || !CONFIG_PCIE_PTM */ + return false; +#endif /* HAVE_STRUCT_PCI_DEV_PTM_ENBED && CONFIG_PCIE_PTM */ +} +#endif /* NEED_PCIE_PTM_ENABLED */ + +/* NEED_PCI_ENABLE_PTM + * + * commit ac6c26da29c1 made this function private + * commit 1d71eb53e451 made this function public again + * This declares/defines the function for kernels missing it in linux/pci.h + */ +#ifdef NEED_PCI_ENABLE_PTM +#ifdef CONFIG_PCIE_PTM +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +#else +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } +#endif /* CONFIG_PCIE_PTM */ +#endif /* NEED_PCI_ENABLE_PTM */ + +/* NEED_PCIE_FLR + * NEED_PCIE_FLR_RETVAL + * + * pcie_flr() was added in the past, but wasn't generally available until 4.12 + * commit a60a2b73ba69 (4.12) made this function available as an extern + * commit 91295d79d658 (4.17) made this function return int instead of void + + * This declares/defines the function for kernels missing it or needing a + * retval in linux/pci.h + */ +#ifdef NEED_PCIE_FLR +static inline int pcie_flr(struct pci_dev *dev) +{ + u32 cap; + + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); + if (!(cap & PCI_EXP_DEVCAP_FLR)) + return -ENOTTY; + + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); + + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); + return 0; +} +#endif /* NEED_PCIE_FLR */ +#ifdef NEED_PCIE_FLR_RETVAL +static inline int _kc_pcie_flr(struct pci_dev *dev) +{ + pcie_flr(dev); + return 0; +} +#define pcie_flr(dev) _kc_pcie_flr((dev)) +#endif /* NEED_PCIE_FLR_RETVAL */ + +/* NEED_DEV_PAGE_IS_REUSABLE + * + * dev_page_is_reusable was introduced by + * commit bc38f30f8dbc ("net: introduce common dev_page_is_reusable()") + * + * This function is trivial to re-implement in full. + */ +#ifdef NEED_DEV_PAGE_IS_REUSABLE +static inline bool dev_page_is_reusable(struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && + !page_is_pfmemalloc(page)); +} +#endif /* NEED_DEV_PAGE_IS_REUSABLE */ + +/* NEED_NAPI_BUILD_SKB + * + * napi_build_skb was introduced by + * commit f450d539c05a: ("skbuff: introduce {,__}napi_build_skb() which reuses NAPI cache heads") + * + * This function is a more efficient version of build_skb(). + */ +#ifdef NEED_NAPI_BUILD_SKB +static inline +struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) +{ + return build_skb(data, frag_size); +} +#endif /* NEED_NAPI_BUILD_SKB */ + +/* NEED_DEBUGFS_LOOKUP + * + * Old RHELs (7.2-7.4) do not have this backported. Create a stub and always + * return NULL. Should not affect important features workflow and allows the + * driver to compile on older kernels. + */ +#ifdef NEED_DEBUGFS_LOOKUP + +#include + +static inline struct dentry * +debugfs_lookup(const char *name, struct dentry *parent) +{ + return NULL; +} +#endif /* NEED_DEBUGFS_LOOKUP */ + +/* NEED_DEBUGFS_LOOKUP_AND_REMOVE + * + * Upstream commit dec9b2f1e0455("debugfs: add debugfs_lookup_and_remove()") + * + * Should work the same as upstream equivalent. + */ +#ifdef NEED_DEBUGFS_LOOKUP_AND_REMOVE + +#include + +static inline void +debugfs_lookup_and_remove(const char *name, struct dentry *parent) +{ + struct dentry *dentry; + + dentry = debugfs_lookup(name, parent); + if (!dentry) + return; + + debugfs_remove(dentry); + dput(dentry); +} +#endif /* NEED_DEBUGFS_LOOKUP_AND_REMOVE */ + +/* NEED_FS_FILE_DENTRY + * + * this is simple impl of file_dentry() (introduced in v4.6, backported to + * stable mainline 4.5 and 4.6 kernels) + * + * prior to file_dentry() existence logic of this function was open-coded, + * and if given kernel has had not backported it, then "oversimplification bugs" + * are present there anyway. + */ +#ifdef NEED_FS_FILE_DENTRY +static inline struct dentry *file_dentry(const struct file *file) +{ + return file->f_path.dentry; +} +#endif /* NEED_FS_FILE_DENTRY */ + +/* NEED_CLASS_CREATE_WITH_MODULE_PARAM + * + * Upstream removed owner argument form helper macro class_create in + * 1aaba11da9aa ("remove module * from class_create()") + * + * In dcfbb67e48a2 ("use lock_class_key already present in struct subsys_private") + * the macro was removed completely. + * + * class_create no longer has owner/module param as it was not used. + */ +#ifdef NEED_CLASS_CREATE_WITH_MODULE_PARAM +static inline struct class *_kc_class_create(const char *name) +{ + return class_create(THIS_MODULE, name); +} +#ifdef class_create +#undef class_create +#endif +#define class_create _kc_class_create +#endif /* NEED_CLASS_CREATE_WITH_MODULE_PARAM */ + +/* NEED_LOWER_16_BITS and NEED_UPPER_16_BITS + * + * Upstream commit 03cb4473be92 ("ice: add low level PTP clock access + * functions") introduced the lower_16_bits() and upper_16_bits() macros. They + * are straight forward to implement if missing. + */ +#ifdef NEED_LOWER_16_BITS +#define lower_16_bits(n) ((u16)((n) & 0xffff)) +#endif /* NEED_LOWER_16_BITS */ + +#ifdef NEED_UPPER_16_BITS +#define upper_16_bits(n) ((u16)((n) >> 16)) +#endif /* NEED_UPPER_16_BITS */ + +#ifdef NEED_HWMON_CHANNEL_INFO +#define HWMON_CHANNEL_INFO(stype, ...) \ + (&(struct hwmon_channel_info) { \ + .type = hwmon_##stype, \ + .config = (u32 []) { \ + __VA_ARGS__, 0 \ + } \ + }) +#endif /* NEED_HWMON_CHANNEL_INFO */ + +/* NEED_ASSIGN_BIT + * + * Upstream commit 5307e2ad69ab ("bitops: Introduce assign_bit()") added helper + * assign_bit() to replace if check for setting/clearing bits. + */ +#ifdef NEED_ASSIGN_BIT +static inline void assign_bit(long nr, unsigned long *addr, bool value) +{ + if (value) + set_bit(nr, addr); + else + clear_bit(nr, addr); +} +#endif /* NEED_ASSIGN_BIT */ + +/* + * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. + * In the meantime, to support gcc < 10, we implement __has_builtin + * by hand. + */ +#ifndef __has_builtin +#define __has_builtin(x) (0) +#endif + +/* NEED___STRUCT_SIZE + * + * 9f7d69c5cd23 ("fortify: Convert to struct vs member helpers") of kernel v6.2 + * has added two following macros, one of them used by DEFINE_FLEX() + */ +#ifdef NEED___STRUCT_SIZE +/* + * When the size of an allocated object is needed, use the best available + * mechanism to find it. (For cases where sizeof() cannot be used.) + */ +#if __has_builtin(__builtin_dynamic_object_size) +#define __struct_size(p) __builtin_dynamic_object_size(p, 0) +#define __member_size(p) __builtin_dynamic_object_size(p, 1) +#else +#define __struct_size(p) __builtin_object_size(p, 0) +#define __member_size(p) __builtin_object_size(p, 1) +#endif +#endif /* NEED___STRUCT_SIZE */ + +/* NEED_KREALLOC_ARRAY + * + * krealloc_array was added by upstream commit + * f0dbd2bd1c22 ("mm: slab: provide krealloc_array()"). + * + * For older kernels, add a new API wrapper around krealloc(). + */ +#ifdef NEED_KREALLOC_ARRAY +static inline void *__must_check krealloc_array(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return krealloc(p, bytes, flags); +} +#endif /* NEED_KREALLOC_ARRAY */ + +/* NEED_XDP_DO_FLUSH + * + * Upstream commit 1d233886dd90 ("xdp: Use bulking for non-map XDP_REDIRECT + * and consolidate code paths") replaced xdp_do_flush_map with xdp_do_flush + * and 7f04bd109d4c ("net: Tree wide: Replace xdp_do_flush_map() with + * xdp_do_flush()") cleaned up related code. + */ +#ifdef NEED_XDP_DO_FLUSH +static inline void xdp_do_flush(void) +{ + xdp_do_flush_map(); +} +#endif /* NEED_XDP_DO_FLUSH */ + +#ifdef NEED_XDP_FEATURES +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + + NETDEV_XDP_ACT_MASK = 127, +}; + +typedef u32 xdp_features_t; + +static inline void +xdp_set_features_flag(struct net_device *dev, xdp_features_t val) +{ +} + +static inline void xdp_clear_features_flag(struct net_device *dev) +{ +} + +static inline void +xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) +{ +} + +static inline void xdp_features_clear_redirect_target(struct net_device *dev) +{ +} +#endif /* NEED_XDP_FEATURES */ + +#ifdef NEED_FIND_NEXT_BIT_WRAP +/* NEED_FIND_NEXT_BIT_WRAP + * + * The find_next_bit_wrap function was added by commit 6cc18331a987 + * ("lib/find_bit: add find_next{,_and}_bit_wrap") + * + * For older kernels, define find_next_bit_wrap function that calls + * find_next_bit function and find_first_bit macro. + */ +static inline +unsigned long find_next_bit_wrap(const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + unsigned long bit = find_next_bit(addr, size, offset); + + if (bit < size) + return bit; + + bit = find_first_bit(addr, offset); + return bit < offset ? bit : size; +} +#endif /* NEED_FIND_NEXT_BIT_WRAP */ + +#ifdef NEED_IS_CONSTEXPR +/* __is_constexpr() macro has moved acros 3 upstream kernel headers: + * commit 3c8ba0d61d04 ("kernel.h: Retain constant expression output for max()/min()") + * introduced it in kernel.h, for kernel v4.17 + * commit b296a6d53339 ("kernel.h: split out min()/max() et al. helpers") moved + * it to minmax.h; + * commit f747e6667ebb ("linux/bits.h: fix compilation error with GENMASK") + * moved it to its current location of const.h + */ +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) +#endif /* NEED_IS_CONSTEXPR */ + +/* NEED_DECLARE_FLEX_ARRAY + * + * Upstream commit 3080ea5553 ("stddef: Introduce DECLARE_FLEX_ARRAY() helper") + * introduces DECLARE_FLEX_ARRAY to support flexible arrays in unions or + * alone in a structure. + */ +#ifdef NEED_DECLARE_FLEX_ARRAY +#define DECLARE_FLEX_ARRAY(TYPE, NAME) \ + struct { \ + struct { } __empty_ ## NAME; \ + TYPE NAME[]; \ + } +#endif /* NEED_DECLARE_FLEX_ARRAY */ + +#ifdef NEED_LIST_COUNT_NODES +/* list_count_nodes was added as part of the list.h API by commit 4d70c74659d9 + * ("i915: Move list_count() to list.h as list_count_nodes() for broader use") + * This landed in Linux v6.3 + * + * Its straightforward to directly implement the basic loop. + */ +static inline size_t list_count_nodes(struct list_head *head) +{ + struct list_head *pos; + size_t count = 0; + + list_for_each(pos, head) + count++; + + return count; +} +#endif /* NEED_LIST_COUNT_NODES */ +#ifdef NEED_STATIC_ASSERT +/* + * NEED_STATIC_ASSERT Introduced with upstream commit 6bab69c6501 + * ("build_bug.h: add wrapper for _Static_assert") + * * Available for kernels >= 5.1 + * + * Macro for _Static_assert GCC keyword (C11) + */ +#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) +#define __static_assert(expr, msg, ...) _Static_assert(expr, msg) +#endif /* NEED_STATIC_ASSERT */ + +#ifdef NEED_ETH_TYPE_VLAN +#include +/** + * eth_type_vlan was added in commit fe19c4f971a5 ("lan: Check for vlan ethernet + * types for 8021.q or 802.1ad"). + * + * eth_type_vlan - check for valid vlan ether type. + * @ethertype: ether type to check + * + * Returns true if the ether type is a vlan ether type. + */ +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): + return true; + default: + return false; + } +} +#endif /* NEED_ETH_TYPE_VLAN */ + +#ifdef NEED___STRUCT_GROUP +/** + * __struct_group() - Create a mirrored named and anonyomous struct + * + * @TAG: The tag name for the named sub-struct (usually empty) + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes (usually empty) + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical layout + * and size: one anonymous and one named. The former's members can be used + * normally without sub-struct naming, and the latter can be used to + * reason about the start, end, and size of the group of struct members. + * The named struct can also be explicitly tagged for layer reuse, as well + * as both having struct attributes appended. + */ +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#endif /* NEED___STRUCT_GROUP */ + +#ifdef NEED_STRUCT_GROUP +/** + * struct_group() - Wrap a set of declarations in a mirrored struct + * + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. + */ +#define struct_group(NAME, MEMBERS...) \ + __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS) + +/** + * struct_group_tagged() - Create a struct_group with a reusable tag + * + * @TAG: The tag name for the named sub-struct + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes struct tag argument for the named copy, + * so the specified layout can be reused later. + */ +#define struct_group_tagged(TAG, NAME, MEMBERS...) \ + __struct_group(TAG, NAME, /* no attrs */, MEMBERS) +#endif /* NEED_STRUCT_GROUP */ + +#ifdef NEED_READ_POLL_TIMEOUT +/* + * 5f5323a14cad ("iopoll: introduce read_poll_timeout macro") + * Added in kernel 5.8 + */ +#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) +#else +#include +#endif /* NEED_READ_POLL_TIMEOUT */ + +#ifndef HAVE_DPLL_LOCK_STATUS_ERROR +/* Copied from include/uapi/linux/dpll.h to have common dpll status enums + * between sysfs and dpll subsystem based solutions. + * cf4f0f1e1c465 ("dpll: extend uapi by lock status error attribute") + * Added in kernel 6.9 + */ +enum dpll_lock_status_error { + DPLL_LOCK_STATUS_ERROR_NONE = 1, + DPLL_LOCK_STATUS_ERROR_UNDEFINED, + DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN, + DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH, + + /* private: */ + __DPLL_LOCK_STATUS_ERROR_MAX, + DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1) +}; + +#endif /* HAVE_DPLL_LOCK_STATUS_ERROR */ + +#ifndef NEED_DPLL_NETDEV_PIN_SET +#define netdev_dpll_pin_set dpll_netdev_pin_set +#define netdev_dpll_pin_clear dpll_netdev_pin_clear +#endif /* HAVE_DPLL_NETDEV_PIN_SET */ + +#ifdef NEED_RADIX_TREE_EMPTY +static inline bool radix_tree_empty(struct radix_tree_root *root) +{ + return !root->rnode; +} +#endif /* NEED_RADIX_TREE_EMPTY */ + +#ifdef NEED_SET_SCHED_FIFO +/* + * 7318d4cc14c8 ("sched: Provide sched_set_fifo()") + * Added in kernel 5.9, + * converted to a macro for kcompat + */ +#include + +#ifdef NEED_SCHED_PARAM +#include +#endif /* NEED_SCHED_PARAM */ +#ifdef NEED_RT_H +#include +#else/* NEED_RT_H */ +#include +#endif /* NEED_RT_H */ +#define sched_set_fifo(p) \ +({ \ + struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; \ + \ + WARN_ON_ONCE(sched_setscheduler_nocheck((p), SCHED_FIFO,&sp) != 0);\ +}) +#endif /* NEED_SET_SCHED_FIFO */ + +#endif /* _KCOMPAT_IMPL_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_overflow.h b/drivers/net/ethernet/guangruntong/kcompat_overflow.h new file mode 100755 index 00000000000000..dc89d338af97a9 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_overflow.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_rhel_defs.h b/drivers/net/ethernet/guangruntong/kcompat_rhel_defs.h new file mode 100755 index 00000000000000..0d981f0ef77ace --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_rhel_defs.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_RHEL_DEFS_H_ +#define _KCOMPAT_RHEL_DEFS_H_ + +/* This is the RedHat Enterprise Linux distribution specific definitions file. + * It defines what features need backports for a given version of the RHEL + * kernel. + * + * It checks the RHEL_RELEASE_CODE and RHEL_RELEASE_VERSION macros to decide + * what support the target kernel has. + * + * It assumes that kcompat_std_defs.h has already been processed, and will + * #define or #undef any flags that have changed based on backports done by + * RHEL. + */ + +#if !RHEL_RELEASE_CODE +#error "RHEL_RELEASE_CODE is 0 or undefined" +#endif + +#ifndef RHEL_RELEASE_VERSION +#error "RHEL_RELEASE_VERSION is undefined" +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) +#define NEED_NETDEV_TXQ_BQL_PREFETCH +#else /* >= 7.3 */ +#undef NEED_DEV_PRINTK_ONCE +#undef NEED_DEVM_KASPRINTF +#define HAVE_DEVLINK_PORT_SPLIT +#endif /* 7.3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +#define NEED_BUILD_BUG_ON +#else /* >= 7.4 */ +#define HAVE_RHEL7_EXTENDED_OFFLOAD_STATS +#define HAVE_INCLUDE_BITFIELD +#endif /* 7.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#else /* >= 7.5 */ +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_FLOW_DISSECTOR_KEY_IP +#endif /* 7.5 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)) +#undef HAVE_XDP_BUFF_RXQ +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#else /* >= 7.6 */ +#undef NEED_JIFFIES_64_TIME_IS_MACROS +#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#undef NEED_TC_SETUP_QDISC_MQPRIO +#endif /* 7.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7)) +#else /* >= 7.7 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#define HAVE_ETHTOOL_NEW_100G_BITS +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_IN_TASK +#define HAVE_FLOW_DISSECTOR_KEY_ENC_IP +#endif /* 7.7 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,8)) +#else /* >= 7.8 */ +#endif /* 7.8 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,9)) +#else /* >= 7.9 */ +/* mul_u64_u64_div_u64 was backported into RHEL 7.9 but not into the early + * 8.x releases + */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#undef NEED_MUL_U64_U64_DIV_U64 +#endif /* < 8.0 */ +#endif /* 7.9 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#else /* >= 8.0 */ +#undef HAVE_TCF_EXTS_TO_LIST +#undef HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_NDO_OFFLOAD_STATS +#undef HAVE_RHEL7_EXTENDED_OFFLOAD_STATS +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +/* 7.7 undefs it due to a backport in 7.7+, but 8.0 needs it still */ +#define NEED_NETDEV_TX_SENT_QUEUE +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#endif /* 8.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,1)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#define NEED_FLOW_MATCH +#else /* >= 8.1 */ +#define HAVE_ETHTOOL_NEW_100G_BITS +#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#undef NEED_FLOW_MATCH +#define HAVE_DEVLINK_PARAMS_PUBLISH +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_INDIRECT_CALL_WRAPPER_MACROS +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#define HAVE_GRETAP_TYPE +#define HAVE_GENEVE_TYPE +#define HAVE_VXLAN_TYPE +#define HAVE_LINKMODE +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#endif /* 8.1 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#else /* >= 8.2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA +#undef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#undef NEED_SKB_FRAG_OFF +#undef NEED_SKB_FRAG_OFF_ADD +#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#define HAVE_DEVLINK_HEALTH +#define HAVE_NETDEV_SB_DEV +#endif /* 8.2 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,3)) +#else /* >= 8.3 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef NEED_DEVLINK_REGION_CREATE_OPS +#undef NEED_MUL_U64_U64_DIV_U64 +#endif /* 8.3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,4)) +#else /* >= 8.4 */ +#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#undef NEED_NET_PREFETCH +#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#undef HAVE_XDP_QUERY_PROG +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NDO_XSK_WAKEUP +#define XSK_UMEM_RETURNS_XDP_DESC +#undef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#define HAVE_PTP_FIND_PIN_UNLOCKED +#endif /* 8.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,5)) +#else /* >= 8.5 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#undef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#undef HAVE_NAPI_BUSY_LOOP +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#undef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define NO_XDP_QUERY_XSK_UMEM +#undef NEED_XSK_BUFF_POOL_RENAME +#define HAVE_NETDEV_BPF_XSK_POOL +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_DEVLINK_OPS_CREATE_DEL +#undef NEED_ETHTOOL_SPRINTF +#endif /* 8.5 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,6)) +#else /* >= 8.6 */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_DEVL_PORT_REGISTER +#endif /* < 9.0 */ +#undef NEED_ETH_HW_ADDR_SET +#endif /* 8.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,7)) +#else /* >= 8.7 */ +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef HAVE_DEVLINK_REGISTER_SETS_DEV +#define HAVE_DEVLINK_NOTIFY_REGISTER +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#define HAVE_DEVLINK_SET_STATE_3_PARAM +#endif /* 8.7 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#else /* >= 9.0 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_NDO_ETH_IOCTL +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#undef HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#endif /* 9.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,1)) +#else /* >= 9.1 */ +#undef HAVE_PASID_SUPPORT +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_XDP_DO_FLUSH +#endif /* 9.1 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,2)) +#else /* >= 9.2 */ +#undef NEED_NETIF_NAPI_ADD_NO_WEIGHT +#endif /* 9.2 */ + +#endif /* _KCOMPAT_RHEL_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_sles_defs.h b/drivers/net/ethernet/guangruntong/kcompat_sles_defs.h new file mode 100755 index 00000000000000..f7de700d4a6a3b --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_sles_defs.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_SLES_DEFS_H_ +#define _KCOMPAT_SLES_DEFS_H_ + +/* This is the SUSE Linux Enterprise distribution specific definitions file. + * It defines what features need backports for a given version of the SUSE + * Linux Enterprise kernel. + * + * It checks a combination of the LINUX_VERSION code and the + * SLE_LOCALVERSION_CODE to determine what support the kernel has. + * + * It assumes that kcompat_std_defs.h has already been processed, and will + * #define or #undef any flags that have changed based on backports done by + * SUSE. + */ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +#if !SLE_KERNEL_REVISION +#error "SLE_KERNEL_REVISION is 0 or undefined" +#endif + +#if SLE_KERNEL_REVISION > 65535 +#error "SLE_KERNEL_REVISION is unexpectedly large" +#endif + +/* SLE kernel versions are a combination of the LINUX_VERSION_CODE along with + * an extra digit that indicates the SUSE specific revision of that kernel. + * This value is found in the CONFIG_LOCALVERSION of the SUSE kernel, which is + * extracted by common.mk and placed into SLE_KERNEL_REVISION_CODE. + * + * We combine the value of SLE_KERNEL_REVISION along with the LINUX_VERSION code + * to generate the useful value that determines what specific kernel we're + * dealing with. + * + * Just in case the SLE_KERNEL_REVISION ever goes above 255, we reserve 16 bits + * instead of 8 for this value. + */ +#define SLE_KERNEL_CODE ((LINUX_VERSION_CODE << 16) + SLE_KERNEL_REVISION) +#define SLE_KERNEL_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,c) << 16) + (d)) + +/* Unlike RHEL, SUSE kernels are not always tied to a single service pack. For + * example, 4.12.14 was used as the base for SLE 15 SP1, SLE 12 SP4, and SLE 12 + * SP5. + * + * You can find the patches that SUSE applied to the kernel tree at + * https://github.com/SUSE/kernel-source. + * + * You can find the correct kernel version for a check by using steps similar + * to the following + * + * 1) download the kernel-source repo + * 2) checkout the relevant branch, i.e SLE15-SP3 + * 3) find the relevant backport you're interested in the patches.suse + * directory + * 4) git log to locate the commit that introduced the backport + * 5) git describe --contains to find the relevant tag that includes that + * commit, i.e. rpm-5.3.18-37 + * 6) those digits represent the SLE kernel that introduced that backport. + * + * Try to keep the checks in SLE_KERNEL_CODE order and condense where + * possible. + */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,4,0,73)) +#else /* >= 4.4.0-73 */ +#define HAVE_DEVLINK_PORT_SPLIT +#endif /* 4.4.0-73 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE > SLE_KERNEL_VERSION(4,12,14,23) && \ + SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,94)) +/* + * 4.12.14 is used as the base for SLE 12 SP4, SLE 12 SP5, SLE 15, and SLE 15 + * SP1. Unfortunately the revision codes do not line up cleanly. SLE 15 + * launched with 4.12.14-23. It appears that SLE 12 SP4 and SLE 15 SP1 both + * diverged from this point, with SLE 12 SP4 kernels starting around + * 4.12.14-94. A few backports for SLE 15 SP1 landed in some alpha and beta + * kernels tagged between 4.12.14-25 up to 4.12.14-32. These changes did not + * make it into SLE 12 SP4. This was cleaned up with SLE 12 SP5 by an apparent + * merge in 4.12.14-111. The official launch of SLE 15 SP1 ended up with + * version 4.12.14-195. + * + * Because of this inconsistency and because all of these kernels appear to be + * alpha or beta kernel releases for SLE 15 SP1, we do not rely on version + * checks between this range. Issue a warning to indicate that we do not + * support these. + */ +#warning "SLE kernel versions between 4.12.14-23 and 4.12.14-94 are not supported" +#endif + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,10)) +#else /* >= 4.12.14-10 */ +#undef NEED_INDIRECT_CALL_WRAPPER_MACROS +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 4.12.14-10 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,100)) +#else /* >= 4.12.14-100 */ +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#endif /* 4.12.14-100 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,111)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#else /* >= 4.12.14-111 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#undef NEED_MACVLAN_ACCEL_PRIV +#undef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +#undef NEED_MACVLAN_SUPPORTS_DEST_FILTER +#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#endif /* 4.12.14-111 */ + +/*****************************************************************************/ +/* SLES 12-SP5 base kernel version */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,115)) +#else /* >= 4.12.14-115 */ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_MIRRED_DEV +#define HAVE_TCF_BLOCK +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#undef NEED_TC_SETUP_QDISC_MQPRIO +#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#undef NEED_NETDEV_TX_SENT_QUEUE +#define HAVE_LINKMODE +#endif /* 4.12.14-115 */ + +/*****************************************************************************/ +/* SLES 15-SP1 base */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,195)) +#else /* >= 4.12.14-195 */ +#define HAVE_DEVLINK_PARAMS +#undef NEED_NETDEV_TX_SENT_QUEUE +#endif /* 4.12.14-195 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,8,2)) +#else /* >= 5.3.8-2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA +#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#undef NEED_SKB_FRAG_OFF +#undef NEED_SKB_FRAG_OFF_ADD +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_DEVLINK_PARAMS_PUBLISH +#endif /* 5.3.8-2 */ + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,16,2)) +#else /* >= 5.3.16-2 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.3.16-2 */ + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,24)) +#else /* >= 5.3.18-24 */ +#undef NEED_MUL_U64_U64_DIV_U64 +#endif + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,26)) +#else /* >= 5.3.18-26 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#endif + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,34)) +#else /* >= 5.3.18-34 */ +#undef NEED_DEVLINK_REGION_CREATE_OPS +#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#endif /* 5.3.18-34 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,37)) +#else /* >= 5.3.18-37 */ +#undef NEED_NET_PREFETCH +#endif /* 5.3.18-37 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,38)) +#else /* >= 5.3.18-38 */ +#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#endif /* 5.3.18-38 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,41)) +#define NEED_XSK_BUFF_POOL_RENAME +#else /* >= 5.3.18-41 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_NETDEV_BPF_XSK_POOL +#undef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#undef NEED_XSK_BUFF_POOL_RENAME +#undef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.3.18-41 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,59)) +#else /* >= 5.3.18-59 */ +#undef NEED_ETH_HW_ADDR_SET +#endif /* 5.3.18-59 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 14, 17, 1)) +#else /* >= 5.14.17-150400.1 */ + #undef HAVE_DEVLINK_PARAMS_PUBLISH + #undef HAVE_DEVLINK_REGISTER_SETS_DEV + #define HAVE_DEVLINK_SET_FEATURES + #undef NEED_ETHTOOL_SPRINTF +#endif /* 5.14.17-150400.1 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,14,21,9)) +#else /* >= 5.14.21-150400.9 */ +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_DEVLINK_OPS_CREATE_DEL +#define HAVE_DEVLINK_SET_STATE_3_PARAM +#endif /* 5.14.21-150400.9 */ + +#endif /* _KCOMPAT_SLES_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_std_defs.h b/drivers/net/ethernet/guangruntong/kcompat_std_defs.h new file mode 100644 index 00000000000000..46a92ff953246a --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_std_defs.h @@ -0,0 +1,393 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_STD_DEFS_H_ +#define _KCOMPAT_STD_DEFS_H_ + +/* This file contains the definitions for what kernel features need backports + * for a given kernel. It targets only the standard stable kernel releases. + * It must check only LINUX_VERSION_CODE and assume the kernel is a standard + * release, and not a custom distribution. + * + * It must define HAVE_ and NEED_ for features. It must not + * implement any backports, instead leaving the implementation to the + * kcompat_impl.h header. + * + * If a feature can be easily implemented as a replacement macro or fully + * backported, use a NEED_ to indicate that the feature needs + * a backport. (If NEED_ is undefined, then no backport for that feature + * is needed). + * + * If a feature cannot be easily implemented in kcompat directly, but + * requires drivers to make specific changes such as stripping out an entire + * feature or modifying a function pointer prototype, use a HAVE_. + */ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +/*****************************************************************************/ +//#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) //sam +#else /* >= 3,10,0 */ +#define NEED_NETIF_NAPI_ADD_NO_WEIGHT +#define NEED_ETHTOOL_SPRINTF +#endif /* 3,10,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) //sam +#define NEED_DEVM_KASPRINTF +#else /* >= 3,17,0 */ +#endif /* 3,17,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) //sam +#define NEED_DEV_PM_DOMAIN_ATTACH_DETACH +#else /* >= 3,18,0 */ +#endif /* 3,18,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) +#define NEED_DEV_PRINTK_ONCE +#else /* >= 3,19,0 */ +#endif /* 3,19,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#define NEED_DEFINE_STATIC_KEY_FALSE +#define NEED_STATIC_BRANCH +#else /* >= 4,3,0 */ +#define NEED_DECLARE_STATIC_KEY_FALSE +#endif /* 4,3,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#else /* >= 4,6,0 */ +#define HAVE_DEVLINK_PORT_SPLIT +#endif /* 4,6,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#else /* >= 4,8,0 */ +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_PCI_ALLOC_IRQ +#define HAVE_NDO_UDP_TUNNEL_CALLBACK +#endif /* 4,8,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#define NEED_JIFFIES_64_TIME_IS_MACROS +#else /* >= 4,9,0 */ +#define HAVE_KTHREAD_DELAYED_API +#define HAVE_NDO_OFFLOAD_STATS +#undef NEED_DECLARE_STATIC_KEY_FALSE +#define HAVE_INCLUDE_BITFIELD +#endif /* 4,9,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,62)) +#ifndef KYLIN_KERNEL44 +#define NEED_IN_TASK +#endif +#else /* >= 4,9,62 */ +#endif /* 4,9,62 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#else /* >= 4,12,0 */ +#define HAVE_NAPI_BUSY_LOOP +#endif /* 4,12,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#else /* >= 4,13,0 */ +#define HAVE_FLOW_DISSECTOR_KEY_IP +#endif /* 4,13,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#define NEED_TC_SETUP_QDISC_MQPRIO +#define NEED_NETDEV_XDP_STRUCT +#else /* >= 4,15,0 */ +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_NDO_BPF +#endif /* 4,15,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#define NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#else /* >= 4,16,0 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 4,16,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#define NEED_CONVERT_ART_NS_TO_TSC +#else /* >= 4,17,0 */ +#endif /* 4,17,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NEED_MACVLAN_ACCEL_PRIV +#define NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +#define NEED_MACVLAN_SUPPORTS_DEST_FILTER +#else /* >= 4,18,0 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#endif /* 4,18,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#else /* >= 4,19,0 */ +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_DEVLINK_REGIONS +#define HAVE_TC_ETF_QOPT_OFFLOAD +#define HAVE_DEVLINK_PARAMS +#define HAVE_FLOW_DISSECTOR_KEY_ENC_IP +#endif /* 4,19,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#ifndef KYLIN_KERNEL +#define NEED_NETDEV_TX_SENT_QUEUE +#endif +#else /* >= 4.20.0 */ +#define HAVE_VXLAN_TYPE +#define HAVE_LINKMODE +#endif /* 4.20.0 */ + +#if defined (UOS_KERNEL) || defined (KYLIN_KERNEL44) +#define HAVE_LINKMODE +#endif /* UOS_KERNEL */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#define NEED_INDIRECT_CALL_WRAPPER_MACROS +#else /* >= 5.0.0 */ +#define HAVE_GRETAP_TYPE +#define HAVE_GENEVE_TYPE +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#define NEED_FLOW_MATCH +#else /* >= 5.1.0 */ +#define HAVE_ETHTOOL_200G_BITS +#define HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_DEVLINK_PARAMS_PUBLISH +#define HAVE_DEVLINK_HEALTH +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#else /* >= 5.2.0 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#define NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#define NEED_BUS_FIND_DEVICE_CONST_DATA +#else /* >= 5.3.0 */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5,3,10)) +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#endif /* 5.3.10 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#ifndef KYLIN_KERNEL +#define NEED_SKB_FRAG_OFF_ADD +#define NEED_SKB_FRAG_OFF +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,14,241) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#undef NEED_SKB_FRAG_OFF +#endif /* > 4.14.241 && < 4.15.0 */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,19,200) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#undef NEED_SKB_FRAG_OFF +#endif /* > 4.19.200 && < 4.20.0 */ + +#define NEED_FLOW_INDR_BLOCK_CB_REGISTER +#else /* >= 5.4.0 */ +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +#else /* >= 5.5.0 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +#define NEED_DEVLINK_REGION_CREATE_OPS +#define NEED_CPU_LATENCY_QOS_RENAME +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_PTP_FIND_PIN_UNLOCKED +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#define NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#else /* >= 5.8.0 */ +#undef HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT +#endif /* 5.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#define NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#define HAVE_XDP_QUERY_PROG +#define NEED_INDIRECT_CALL_3_AND_4 +#define NEED_MUL_U64_U64_DIV_U64 +#else /* >= 5.9.0 */ +#define HAVE_TASKLET_SETUP +#endif /* 5.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#define NEED_NET_PREFETCH +#define NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#define NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define NEED_XSK_BUFF_POOL_RENAME +#else /* >= 5.10.0 */ +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_UDP_TUNNEL_NIC_SHARED +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_XSK_BATCHED_DESCRIPTOR_INTERFACES +#define HAVE_PASID_SUPPORT +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#define HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#define NEED_EXPORT_INDIRECT_CALLABLE +#else /* >= 5.12.0 */ +#undef HAVE_NDO_UDP_TUNNEL_CALLBACK +#define HAVE_DEVLINK_OPS_CREATE_DEL +#endif /* 5.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,13,0)) +/* HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE + * + * create api changed as part of the commit c2ef2f50ad0c( vfio/mdev: Remove + * kobj from mdev_parent_ops->create()) + * + * if flag is defined use the old API else new API + */ +#define HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +#define HAVE_DEV_IN_MDEV_API +#else /* >= 5.13.0 */ +#define HAVE_XPS_MAP_TYPE +#undef NEED_ETHTOOL_SPRINTF +#endif /* 5.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)) +#else /* >= 5.14.0 */ +#define HAVE_TTY_WRITE_ROOM_UINT +#endif /* 5.14.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#define NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_DEVLINK_REGISTER_SETS_DEV +#define NEED_ETH_HW_ADDR_SET +#else /* >= 5.15.0 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_DEVICE_IN_MDEV_PARENT_OPS +#define HAVE_LMV1_SUPPORT +#define NEED_PCI_IOV_VF_ID +#define HAVE_DEVLINK_SET_STATE_3_PARAM +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)) +#else /* >= 5.16.0 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_NOTIFY_REGISTER +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#define HAVE_XSK_BATCHED_RX_ALLOC +#endif /* 5.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#else /* >=5.17.0*/ +#define HAVE_XDP_DO_FLUSH +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* 5.17.0 */ + +#if defined(EULER_KERNEL) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* EULER_KERNEL */ + + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)) +#else /* >=5.18.0*/ +#undef HAVE_LMV1_SUPPORT +#undef NEED_PCI_IOV_VF_ID +#define HAVE_GTP_SUPPORT +#undef HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#define HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#define HAVE_DEVL_PORT_REGISTER +#endif /* 5.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,19,0)) +#else /* >=5.19.0 */ +#define HAVE_NDO_FDB_DEL_EXTACK +#define HAVE_NETIF_SET_TSO_MAX +#endif /* 5.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,0,0)) +#else /* >=6.0.0 */ +#define HAVE_FLOW_DISSECTOR_KEY_PPPOE +#endif /* 6.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0)) +#else /* >=6.1.0 */ +#define HAVE_FLOW_DISSECTOR_KEY_L2TPV3 +#undef NEED_NETIF_NAPI_ADD_NO_WEIGHT +#define HAVE_TTY_TERMIOS_CONST_STRUCT +#endif /* 6.1.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,2,0)) +#else /* >=6.2.0 */ +#define HAVE_SET_NETDEV_DEVLINK_PORT +#undef HAVE_NDO_GET_DEVLINK_PORT +#endif /* 6.2.0 */ + +#endif /* _KCOMPAT_STD_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_ubuntu_defs.h b/drivers/net/ethernet/guangruntong/kcompat_ubuntu_defs.h new file mode 100755 index 00000000000000..9b84e9d0304b40 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_ubuntu_defs.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_UBUNTU_DEFS_H_ +#define _KCOMPAT_UBUNTU_DEFS_H_ + +/* This file contains the definitions for the Ubuntu specific distribution of + * the Linux kernel. + * + * It checks the UBUNTU_VERSION_CODE to decide which features are available in + * the target kernel. It assumes that kcompat_std_defs.h has already been + * processed, and will #define or #undef the relevant flags based on what + * features were backported by Ubuntu. + */ + +#if !UTS_UBUNTU_RELEASE_ABI +#error "UTS_UBUNTU_RELEASE_ABI is 0 or undefined" +#endif + +#if !UBUNTU_VERSION_CODE +#error "UBUNTU_VERSION_CODE is 0 or undefined" +#endif + +#ifndef UBUNTU_VERSION +#error "UBUNTU_VERSION is undefined" +#endif + +/*****************************************************************************/ +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,15,0,159) && \ + UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,15,0,999)) +#undef NEED_SKB_FRAG_OFF +#endif + +/*****************************************************************************/ +#endif /* _KCOMPAT_UBUNTU_DEFS_H_ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 05cc07b8f48c03..0a30d9b406a16d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -121,6 +121,16 @@ config DWMAC_MESON the stmmac device driver. This driver is used for Meson6, Meson8, Meson8b and GXBB SoCs. +config DWMAC_PHYTIUM + tristate "Phytium dwmac support" + default ARCH_PHYTIUM + depends on (OF || ACPI) && (ARCH_PHYTIUM || COMPILE_TEST) + help + Support for GMAC controller on Phytium SoCs. + + This selects the Phytium GMAC glue layer support for the + stmmac device driver. + config DWMAC_QCOM_ETHQOS tristate "Qualcomm ETHQOS support" default ARCH_QCOM diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index c2f0e91f6bf83d..9ad873b60920ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o +obj-$(CONFIG_DWMAC_PHYTIUM) += dwmac-phytium.o obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c new file mode 100644 index 00000000000000..ea37d120113d11 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium DWMAC specific glue layer + * + * Copyright (c) 2022-2024 Phytium Technology Co., Ltd. + * + * Chen Baozi + */ + +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +static int phytium_get_mac_mode(struct fwnode_handle *fwnode) +{ + const char *pm; + int err, i; + + err = fwnode_property_read_string(fwnode, "mac-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phy_modes(i))) + return i; + } + + return -ENODEV; +} + +static int phytium_dwmac_acpi_phy(struct plat_stmmacenet_data *plat, + struct fwnode_handle *np, struct device *dev) +{ + plat->mdio_bus_data = devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); + + if (!plat->mdio_bus_data) + return -ENOMEM; + + return 0; +} + +static int phytium_dwmac_probe(struct platform_device *pdev) +{ + struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); + struct plat_stmmacenet_data *plat; + struct stmmac_resources stmmac_res; + struct device_node *np = pdev->dev.of_node; + u64 clk_freq; + char clk_name[20]; + int ret; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; + + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->phy_interface = device_get_phy_mode(&pdev->dev); +#ifdef CONFIG_ACPI + static const struct acpi_device_id phytium_old_acpi_id[] = { + { .id = "FTGM0001" }, // compat FT2000/4 id + { } + }; + /* "phy-mode" in phytium platform DSDT is not correct in some old device. + * Force this PHY mode to rgmii-rxid and info of its use. + * If the phy-mode rgmii is realy used, a blacklist may need to be added. + */ + if (acpi_match_device_ids(to_acpi_device(&pdev->dev), phytium_old_acpi_id) && + plat->phy_interface == PHY_INTERFACE_MODE_RGMII) { + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_RXID; + dev_info(&pdev->dev, "phytium workaround: phy-mode from rgmii to rgmii-rxid\n"); + } +#endif + if (plat->phy_interface < 0) + return plat->phy_interface; + + plat->mac_interface = phytium_get_mac_mode(fwnode); + if (plat->mac_interface < 0) + plat->mac_interface = plat->phy_interface; + + /* Configure PHY if using device-tree */ + if (pdev->dev.of_node) { + plat->phy_node = of_parse_phandle(np, "phy-handle", 0); + plat->port_node = of_fwnode_handle(np); + } + + if (pdev->dev.of_node) { + plat->bus_id = of_alias_get_id(np, "ethernet"); + if (plat->bus_id < 0) + plat->bus_id = 0; + } else if (fwnode_property_read_u32(fwnode, "bus_id", &plat->bus_id)) { + plat->bus_id = 2; + } + + plat->phy_addr = -1; + plat->clk_csr = -1; + plat->has_gmac = 1; + plat->enh_desc = 1; + plat->bugged_jumbo = 1; + plat->pmt = 1; + plat->force_sf_dma_mode = 1; + + if (fwnode_property_read_u32(fwnode, "max-speed", &plat->max_speed)) + plat->max_speed = -1; + + if (fwnode_property_read_u32(fwnode, "max-frame-size", &plat->maxmtu)) + plat->maxmtu = JUMBO_LEN; + + if (fwnode_property_read_u32(fwnode, "snps,multicast-filter-bins", + &plat->multicast_filter_bins)) + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + if (fwnode_property_read_u32(fwnode, "snps,perfect-filter-entries", + &plat->unicast_filter_entries)) + plat->unicast_filter_entries = 1; + + if (fwnode_property_read_u32(fwnode, "tx-fifo-depth", &plat->tx_fifo_size)) + plat->tx_fifo_size = 0x1000; + + if (fwnode_property_read_u32(fwnode, "rx-fifo-depth", &plat->rx_fifo_size)) + plat->rx_fifo_size = 0x1000; + + if (phytium_dwmac_acpi_phy(plat, fwnode, &pdev->dev)) + return -ENODEV; + + plat->rx_queues_to_use = 1; + plat->tx_queues_to_use = 1; + plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + + if (fwnode_property_read_u64(fwnode, "clock-frequency", &clk_freq)) + clk_freq = 125000000; + + /* Set system clock */ + snprintf(clk_name, sizeof(clk_name), "%s-%d", "stmmaceth", plat->bus_id); + + plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, clk_name, NULL, 0, clk_freq); + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); + plat->stmmac_clk = NULL; + } + + ret = clk_prepare_enable(plat->stmmac_clk); + if (ret) { + clk_unregister_fixed_rate(plat->stmmac_clk); + return ret; + } + + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + + if (fwnode_property_read_u32(fwnode, "snps,pbl", &plat->dma_cfg->pbl)) + plat->dma_cfg->pbl = 16; + + fwnode_property_read_u32(fwnode, "snps,txpbl", &plat->dma_cfg->txpbl); + fwnode_property_read_u32(fwnode, "snps,rxpbl", &plat->dma_cfg->rxpbl); + + plat->dma_cfg->pblx8 = !fwnode_property_read_bool(fwnode, "snps,no-pbl-x8"); + plat->dma_cfg->aal = fwnode_property_read_bool(fwnode, "snps,aal"); + plat->dma_cfg->fixed_burst = fwnode_property_read_bool(fwnode, "snps,fixed-burst"); + plat->dma_cfg->mixed_burst = fwnode_property_read_bool(fwnode, "snps,mixed-burst"); +#ifdef CONFIG_ACPI + /* Some old phytium 2000/4 FTGM0001 cannot auto deferred stmmac DMA settings + * show kernel error 'DMA descriptors allocation failed' + */ + if (acpi_match_device_ids(to_acpi_device(&pdev->dev), phytium_old_acpi_id)) { + pdev->dev.dma_ops = NULL; // solved set DMA mask Failed + plat->host_dma_width = 32; + } +#endif + plat->axi->axi_lpi_en = false; + plat->axi->axi_xit_frm = false; + plat->axi->axi_wr_osr_lmt = 7; + plat->axi->axi_rd_osr_lmt = 7; + plat->axi->axi_blen[0] = 16; + + memset(&stmmac_res, 0, sizeof(stmmac_res)); + stmmac_res.addr = devm_platform_ioremap_resource(pdev, 0); + stmmac_res.irq = platform_get_irq(pdev, 0); + if (stmmac_res.irq < 0) { + dev_err(&pdev->dev, "IRQ not found.\n"); + return -ENXIO; + } + stmmac_res.wol_irq = stmmac_res.irq; + stmmac_res.lpi_irq = -1; + + return stmmac_dvr_probe(&pdev->dev, plat, &stmmac_res); +} + +static void phytium_dwmac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + struct plat_stmmacenet_data *plat = priv->plat; + + stmmac_pltfr_remove(pdev); + clk_unregister_fixed_rate(plat->stmmac_clk); +} + +#ifdef CONFIG_OF +static const struct of_device_id phytium_dwmac_of_match[] = { + { .compatible = "phytium,gmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_dwmac_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_dwmac_acpi_ids[] = { + { .id = "FTGM0001" }, // compat FT2000/4 id + { .id = "PHYT0004" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_dwmac_acpi_ids); +#endif + +static struct platform_driver phytium_dwmac_driver = { + .probe = phytium_dwmac_probe, + .remove = phytium_dwmac_remove, + .driver = { + .name = "phytium-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = of_match_ptr(phytium_dwmac_of_match), + .acpi_match_table = ACPI_PTR(phytium_dwmac_acpi_ids), + }, +}; +module_platform_driver(phytium_dwmac_driver); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium DWMAC specific glue layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c index bfd1bb7f022bd8..b57aa2d1f7a888 100644 --- a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c @@ -272,12 +272,12 @@ static void zhaoxin_gpio_set(struct gpio_chip *chip, unsigned int offset, int va static int zhaoxin_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { - return pinctrl_gpio_direction_input(chip->base + offset); + return pinctrl_gpio_direction_input(chip, offset); } static int zhaoxin_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { - return pinctrl_gpio_direction_output(chip->base + offset); + return pinctrl_gpio_direction_output(chip, offset); } static int zhaoxin_gpio_request(struct gpio_chip *gc, unsigned int offset) diff --git a/drivers/tty/serial/phytium-uart.c b/drivers/tty/serial/phytium-uart.c index 84a96350c3dc2c..5aa65635d56c59 100644 --- a/drivers/tty/serial/phytium-uart.c +++ b/drivers/tty/serial/phytium-uart.c @@ -232,7 +232,7 @@ static bool phytium_tx_char(struct phytium_uart_port *pup, unsigned char c, static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) { - struct circ_buf *xmit = &pup->port.state->xmit; + struct tty_port *tport = &pup->port.state->port; int count = pup->port.fifosize >> 1; if (pup->port.x_char) { @@ -241,7 +241,7 @@ static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) pup->port.x_char = 0; --count; } - if (uart_circ_empty(xmit) || uart_tx_stopped(&pup->port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&pup->port)) { phytium_stop_tx(&pup->port); return false; } @@ -250,16 +250,14 @@ static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) if (likely(from_irq) && count-- == 0) break; - if (!phytium_tx_char(pup, xmit->buf[xmit->tail], from_irq)) + if (!phytium_tx_char(pup, kfifo_len(&tport->xmit_fifo), from_irq)) break; + } while (!kfifo_is_empty(&tport->xmit_fifo)); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - } while (!uart_circ_empty(xmit)); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&pup->port); - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { phytium_stop_tx(&pup->port); return false; } diff --git a/sound/pci/hda/hda_phytium.c b/sound/pci/hda/hda_phytium.c index 0e6f6f11749cb1..41c2b68a519247 100644 --- a/sound/pci/hda/hda_phytium.c +++ b/sound/pci/hda/hda_phytium.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "hda_controller.h" #include "hda_phytium.h"